From d84b6d21125f79d1a555d109cdf9d724cea03daa Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Tue, 16 Dec 2025 18:16:43 +0100 Subject: [PATCH 01/17] update markdownlint and editorconfig configuration add task targets add pre-commit config --- .editorconfig | 5 +++-- .markdownlint.jsonc | 39 +++++++++++++++++++++++++-------------- .pre-commit-config.yaml | 11 +++++++++++ Taskfile.yml | 25 +++++++++++++++++++++++++ 4 files changed, 64 insertions(+), 16 deletions(-) create mode 100644 .pre-commit-config.yaml diff --git a/.editorconfig b/.editorconfig index a37a0f9bf..c0719870d 100644 --- a/.editorconfig +++ b/.editorconfig @@ -16,8 +16,9 @@ trim_trailing_whitespace = true indent_style = space indent_size = 2 -# 4 space indentation -[*.md] +# Matches multiple files with brace expansion notation +# Set default charset +[*.{md}] indent_style = space indent_size = 4 diff --git a/.markdownlint.jsonc b/.markdownlint.jsonc index 940758cae..947843271 100644 --- a/.markdownlint.jsonc +++ b/.markdownlint.jsonc @@ -1,34 +1,45 @@ { // Example markdownlint JSON(C) configuration with all properties set to their default value - // Default state for all rules "default": true, - // not enforcing line break a specific lenght "MD013": false, - "MD007": { "indent": 4 }, - // allow Multiple headings with the same content "MD024": false, - + // allow multiple top level headings -> generated files + "MD025": false, // to be consistent with prettier "MD030": { - "ul_single": 3, - "ul_multi": 3 + "ul_single": 1, + "ul_multi": 1 }, - // allow some html "MD033": { - "allowed_elements": ["figure", "figcaption", "div"] + "allowed_elements": [ + "figure", + "figcaption", + "div", + "details", + "summary", + "a", + "p", + "img", + "span", + "br" + ] }, - + // not enfore h1 as first content of a file + "MD041": false, // allow missing img alt text "MD045": false, - // disabled due to admonition / tabs using indented blocks - "MD046": false - //"MD046": { "style": "fenced" } -} + "MD046": false, + //"MD046": { "style": "fenced" }, + // allow non-descriptive link text, like "here" + "MD059": false, + // relax table formatting rules + "MD060": false +} \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..9f700f269 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ + +--- +repos: + - repo: local + hooks: + + - id: markdownlint + name: check:markdownlint + entry: task check:markdownlint + language: python + pass_filenames: false diff --git a/Taskfile.yml b/Taskfile.yml index 8766ba709..cf3e29b87 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -180,3 +180,28 @@ tasks: cmds: - poetry run mike serve -b {{.PUBLIC_BRANCH}} + format:fix: + desc: markdownlit md files and apply possible style fixes + cmds: + - | + { + # recurse + find ./docs -type f -name '*.md' -print0 + + # no recursion (top-level only) + find . -maxdepth 1 -type f -name '*.md' -print0 + } | xargs -0 markdownlint --config .markdownlint.jsonc --fix + + check:markdownlint: + desc: run markdownlint on md files for style issues + cmds: + #- mkdir -p ./dist + - | + { + # recurse + find ./docs -type f -name '*.md' -print0 + + # no recursion (top-level only) + find . -maxdepth 1 -type f -name '*.md' -print0 + } | xargs -0 markdownlint --config .markdownlint.jsonc + # --json --output ./dist/markdownlint-issues.json From 6f95faaceee9d9442e0abb39cb8ae2dc9ad88136 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Tue, 16 Dec 2025 18:17:35 +0100 Subject: [PATCH 02/17] normalize (--fix) all markdown files according to linter configuration --- CONTRIBUTING.md | 84 +-- README.md | 9 +- .../command-reference/admin/acl/index.md | 50 +- .../command-reference/admin/client/index.md | 24 +- .../command-reference/admin/index.md | 22 +- .../command-reference/admin/metrics/index.md | 25 +- .../admin/migration/index.md | 16 +- .../command-reference/admin/store/index.md | 40 +- .../command-reference/admin/user/index.md | 48 +- .../admin/workspace/index.md | 21 +- .../admin/workspace/python/index.md | 47 +- .../command-reference/config/index.md | 39 +- .../command-reference/dataset/index.md | 75 +- .../dataset/resource/index.md | 29 +- .../command-reference/graph/imports/index.md | 33 +- .../command-reference/graph/index.md | 49 +- .../command-reference/graph/insights/index.md | 36 +- .../graph/validation/index.md | 42 +- .../command-reference/index.md | 1 - .../command-reference/project/file/index.md | 53 +- .../command-reference/project/index.md | 64 +- .../project/variable/index.md | 44 +- .../command-reference/query/index.md | 48 +- .../vocabulary/cache/index.md | 14 +- .../command-reference/vocabulary/index.md | 37 +- .../command-reference/workflow/index.md | 35 +- .../workflow/scheduler/index.md | 36 +- .../index.md | 2 +- .../configuration/completion-setup/index.md | 9 +- .../environment-based-configuration/index.md | 22 +- .../file-based-configuration/index.md | 2 - .../index.md | 13 +- .../configuration/index.md | 9 +- .../cmemc-command-line-interface/index.md | 13 +- .../installation/index.md | 5 +- .../invocation/docker-image/index.md | 1 - .../invocation/github-action/index.md | 1 - .../invocation/gitlab-pipeline/index.md | 1 - .../invocation/index.md | 10 +- .../invocation/sparql-scripts/index.md | 3 +- .../troubleshooting-and-caveats/index.md | 1 - .../index.md | 1 - docs/automate/continuous-integration/index.md | 5 +- docs/automate/index.md | 9 +- docs/automate/scheduling-workflows/index.md | 2 - docs/build/active-learning/index.md | 18 +- .../build/define-prefixes-namespaces/index.md | 20 +- docs/build/evaluate-template/index.md | 28 +- .../extracting-data-from-a-web-api/index.md | 3 +- docs/build/index.md | 15 +- docs/build/integrations/index.md | 142 ++-- docs/build/kafka-consumer/index.md | 34 +- .../index.md | 16 +- .../index.md | 45 +- .../index.md | 38 +- docs/build/mapping-creator/index.md | 20 +- docs/build/reference/aggregator/average.md | 9 +- .../reference/aggregator/firstNonEmpty.md | 7 +- .../reference/aggregator/geometricMean.md | 12 +- .../aggregator/handleMissingValues.md | 18 +- docs/build/reference/aggregator/max.md | 10 +- docs/build/reference/aggregator/min.md | 10 +- docs/build/reference/aggregator/negate.md | 5 +- .../reference/aggregator/quadraticMean.md | 13 +- docs/build/reference/aggregator/scale.md | 19 +- .../reference/customtask/CancelWorkflow.md | 15 +- .../reference/customtask/ConcatenateToFile.md | 20 - .../customtask/CustomSQLExecution.md | 9 +- docs/build/reference/customtask/DistinctBy.md | 11 +- .../customtask/JsonParserOperator.md | 15 +- docs/build/reference/customtask/Merge.md | 5 +- .../reference/customtask/MultiTableMerge.md | 15 +- docs/build/reference/customtask/Pivot.md | 19 +- docs/build/reference/customtask/Scheduler.md | 17 +- .../reference/customtask/SearchAddresses.md | 15 - docs/build/reference/customtask/SendEMail.md | 40 - .../reference/customtask/SparkFunction.md | 13 +- docs/build/reference/customtask/Template.md | 20 +- docs/build/reference/customtask/Unpivot.md | 17 +- .../reference/customtask/XmlParserOperator.md | 13 +- .../reference/customtask/addProjectFiles.md | 13 +- .../customtask/cmem-plugin-jq-workflow.md | 14 +- .../cmem_plugin_auth-workflow-auth-OAuth2.md | 17 +- .../cmem_plugin_graph_insights-Update.md | 8 - ..._graphql-workflow-graphql-GraphQLPlugin.md | 14 - ...in_irdi-workflow-irdi_plugin-IrdiPlugin.md | 40 +- .../customtask/cmem_plugin_jira-JqlQuery.md | 28 +- .../cmem_plugin_kafka-ReceiveMessages.md | 33 - .../cmem_plugin_kafka-SendMessages.md | 29 - .../cmem_plugin_kubernetes-Execute.md | 21 +- .../cmem_plugin_llm-CreateEmbeddings.md | 26 - .../cmem_plugin_llm-ExecuteInstructions.md | 45 +- .../cmem_plugin_loopwf-task-StartWorkflow.md | 16 - .../customtask/cmem_plugin_mattermost.md | 19 +- .../cmem_plugin_nextcloud-Download.md | 26 +- .../customtask/cmem_plugin_nextcloud-List.md | 25 +- .../cmem_plugin_nextcloud-Upload.md | 19 +- .../cmem_plugin_office365-Download.md | 27 +- .../customtask/cmem_plugin_office365-List.md | 27 +- .../cmem_plugin_office365-Upload.md | 23 +- ...cmem_plugin_parameters-ParametersPlugin.md | 10 +- ...ugin_pdf_extract-pdf_extract-PdfExtract.md | 35 +- .../customtask/cmem_plugin_pgvector-Search.md | 25 - .../customtask/cmem_plugin_pgvector-Store.md | 27 - .../cmem_plugin_project_resources-List.md | 7 +- ...ugin_project_resources-UploadLocalFiles.md | 11 - .../cmem_plugin_salesforce-SoqlQuery.md | 20 +- ...force-workflow-operations-SobjectCreate.md | 24 +- ...lugin_shapes-plugin_shapes-ShapesPlugin.md | 31 +- ...itfile-plugin_splitfile-SplitFilePlugin.md | 18 - .../customtask/cmem_plugin_ssh-Download.md | 106 +-- .../customtask/cmem_plugin_ssh-Execute.md | 105 +-- .../customtask/cmem_plugin_ssh-List.md | 106 +-- .../customtask/cmem_plugin_ssh-Upload.md | 68 +- ...in_validation-validate-ValidateEntities.md | 20 - ...lugin_validation-validate-ValidateGraph.md | 26 +- .../cmem_plugin_wfreports_get_report.md | 11 +- .../customtask/cmem_plugin_yaml-parse.md | 27 +- .../build/reference/customtask/combine-csv.md | 14 +- .../customtask/deleteProjectFiles.md | 11 +- .../reference/customtask/downloadFile.md | 23 +- ...ataPlatformGraphStoreFileUploadOperator.md | 13 +- .../customtask/eccencaRestOperator.md | 63 +- .../reference/customtask/getProjectFiles.md | 13 +- .../reference/customtask/setParameters.md | 11 +- .../reference/customtask/shacl-pyshacl.md | 51 +- .../customtask/sparqlCopyOperator.md | 11 +- .../customtask/sparqlSelectOperator.md | 15 +- .../customtask/sparqlUpdateOperator.md | 15 +- .../customtask/sqlUpdateQueryOperator.md | 9 +- .../customtask/tripleRequestOperator.md | 5 +- .../customtask/ucumNormalizationTask.md | 22 +- .../customtask/validateXsdOperator.md | 9 +- .../reference/customtask/xsltOperator.md | 35 +- docs/build/reference/dataset/Hive.md | 19 +- docs/build/reference/dataset/Jdbc.md | 67 +- .../reference/dataset/LocalInternalDataset.md | 5 +- docs/build/reference/dataset/SnowflakeJdbc.md | 43 -- docs/build/reference/dataset/alignment.md | 11 +- docs/build/reference/dataset/avro.md | 17 +- docs/build/reference/dataset/binaryFile.md | 12 +- docs/build/reference/dataset/csv.md | 38 +- .../reference/dataset/eccencaDataPlatform.md | 97 +-- docs/build/reference/dataset/excel.md | 16 - docs/build/reference/dataset/file.md | 16 - .../reference/dataset/googlespreadsheet.md | 15 - docs/build/reference/dataset/inMemory.md | 9 +- docs/build/reference/dataset/index.md | 2 +- docs/build/reference/dataset/internal.md | 9 +- docs/build/reference/dataset/json.md | 25 +- docs/build/reference/dataset/multiCsv.md | 34 +- docs/build/reference/dataset/neo4j.md | 21 +- .../reference/dataset/office365preadsheet.md | 18 +- docs/build/reference/dataset/orc.md | 19 +- docs/build/reference/dataset/parquet.md | 19 +- docs/build/reference/dataset/sparkView.md | 23 +- .../build/reference/dataset/sparqlEndpoint.md | 38 +- docs/build/reference/dataset/sqlEndpoint.md | 17 +- docs/build/reference/dataset/text.md | 18 +- docs/build/reference/dataset/xml.md | 22 +- .../PhysicalQuantitiesDistance.md | 120 ++- .../distancemeasure/cjkReadingDistance.md | 11 +- .../distancemeasure/constantDistance.md | 9 +- .../build/reference/distancemeasure/cosine.md | 9 +- docs/build/reference/distancemeasure/date.md | 68 +- .../reference/distancemeasure/dateTime.md | 5 +- docs/build/reference/distancemeasure/dice.md | 5 +- .../reference/distancemeasure/equality.md | 18 +- .../reference/distancemeasure/greaterThan.md | 12 +- .../reference/distancemeasure/inequality.md | 28 +- .../distancemeasure/insideNumericInterval.md | 9 +- .../reference/distancemeasure/isSubstring.md | 9 +- .../reference/distancemeasure/jaccard.md | 28 +- docs/build/reference/distancemeasure/jaro.md | 7 +- .../reference/distancemeasure/jaroWinkler.md | 5 +- .../distancemeasure/koreanPhonemeDistance.md | 11 +- .../distancemeasure/koreanTranslitDistance.md | 11 +- .../reference/distancemeasure/levenshtein.md | 51 +- .../distancemeasure/levenshteinDistance.md | 46 +- .../reference/distancemeasure/lowerThan.md | 12 +- docs/build/reference/distancemeasure/num.md | 8 +- .../distancemeasure/numericEquality.md | 38 +- .../build/reference/distancemeasure/qGrams.md | 48 +- .../distancemeasure/relaxedEquality.md | 5 +- .../reference/distancemeasure/softjaccard.md | 9 +- .../reference/distancemeasure/startsWith.md | 13 +- .../distancemeasure/substringDistance.md | 9 +- .../distancemeasure/tokenwiseDistance.md | 26 +- docs/build/reference/distancemeasure/wgs84.md | 9 +- docs/build/reference/index.md | 12 +- .../reference/transformer/Combine/concat.md | 39 +- .../transformer/Combine/concatMultiValues.md | 42 +- .../transformer/Combine/concatPairwise.md | 21 +- .../reference/transformer/Combine/merge.md | 8 +- .../transformer/Conditional/containsAllOf.md | 13 +- .../transformer/Conditional/containsAnyOf.md | 13 +- .../transformer/Conditional/ifContains.md | 19 +- .../transformer/Conditional/ifExists.md | 9 +- .../transformer/Conditional/ifMatchesRegex.md | 27 +- .../Conditional/negateTransformer.md | 9 +- .../transformer/Conversion/convertCharset.md | 11 +- .../transformer/Date/compareDates.md | 22 +- .../reference/transformer/Date/currentDate.md | 5 +- .../transformer/Date/datetoTimestamp.md | 20 +- .../reference/transformer/Date/duration.md | 5 +- .../transformer/Date/durationInDays.md | 5 +- .../transformer/Date/durationInSeconds.md | 5 +- .../transformer/Date/durationInYears.md | 5 +- .../transformer/Date/numberToDuration.md | 9 +- .../reference/transformer/Date/parseDate.md | 39 +- .../reference/transformer/Date/timeToDate.md | 27 +- .../reference/transformer/Excel/Excel_ABS.md | 8 +- .../reference/transformer/Excel/Excel_ACOS.md | 8 +- .../transformer/Excel/Excel_ACOSH.md | 8 +- .../reference/transformer/Excel/Excel_AND.md | 8 +- .../reference/transformer/Excel/Excel_ASIN.md | 8 +- .../transformer/Excel/Excel_ASINH.md | 8 +- .../reference/transformer/Excel/Excel_ATAN.md | 8 +- .../transformer/Excel/Excel_ATAN2.md | 8 +- .../transformer/Excel/Excel_ATANH.md | 8 +- .../transformer/Excel/Excel_AVEDEV.md | 8 +- .../transformer/Excel/Excel_AVERAGE.md | 8 +- .../transformer/Excel/Excel_AVERAGEA.md | 8 +- .../transformer/Excel/Excel_CEILING.md | 8 +- .../transformer/Excel/Excel_CHOOSE.md | 8 +- .../transformer/Excel/Excel_CLEAN.md | 8 +- .../reference/transformer/Excel/Excel_CODE.md | 8 +- .../transformer/Excel/Excel_COMBIN.md | 8 +- .../transformer/Excel/Excel_CORREL.md | 8 +- .../reference/transformer/Excel/Excel_COS.md | 8 +- .../reference/transformer/Excel/Excel_COSH.md | 8 +- .../transformer/Excel/Excel_COUNT.md | 8 +- .../transformer/Excel/Excel_COUNTA.md | 8 +- .../transformer/Excel/Excel_COVAR.md | 8 +- .../transformer/Excel/Excel_DEGREES.md | 8 +- .../transformer/Excel/Excel_DEVSQ.md | 8 +- .../reference/transformer/Excel/Excel_EVEN.md | 8 +- .../transformer/Excel/Excel_EXACT.md | 8 +- .../reference/transformer/Excel/Excel_EXP.md | 8 +- .../reference/transformer/Excel/Excel_FACT.md | 8 +- .../transformer/Excel/Excel_FALSE.md | 8 +- .../reference/transformer/Excel/Excel_FIND.md | 8 +- .../transformer/Excel/Excel_FLOOR.md | 8 +- .../transformer/Excel/Excel_FORECAST.md | 8 +- .../reference/transformer/Excel/Excel_FV.md | 8 +- .../transformer/Excel/Excel_GEOMEAN.md | 8 +- .../reference/transformer/Excel/Excel_IF.md | 8 +- .../reference/transformer/Excel/Excel_INT.md | 8 +- .../transformer/Excel/Excel_INTERCEPT.md | 8 +- .../reference/transformer/Excel/Excel_IPMT.md | 8 +- .../reference/transformer/Excel/Excel_IRR.md | 8 +- .../transformer/Excel/Excel_LARGE.md | 8 +- .../reference/transformer/Excel/Excel_LEFT.md | 8 +- .../reference/transformer/Excel/Excel_LN.md | 8 +- .../reference/transformer/Excel/Excel_LOG.md | 8 +- .../transformer/Excel/Excel_LOG10.md | 8 +- .../reference/transformer/Excel/Excel_MAX.md | 8 +- .../reference/transformer/Excel/Excel_MAXA.md | 8 +- .../transformer/Excel/Excel_MEDIAN.md | 8 +- .../reference/transformer/Excel/Excel_MID.md | 8 +- .../reference/transformer/Excel/Excel_MIN.md | 8 +- .../reference/transformer/Excel/Excel_MINA.md | 8 +- .../reference/transformer/Excel/Excel_MIRR.md | 8 +- .../reference/transformer/Excel/Excel_MOD.md | 8 +- .../reference/transformer/Excel/Excel_MODE.md | 8 +- .../transformer/Excel/Excel_NORMDIST.md | 8 +- .../transformer/Excel/Excel_NORMINV.md | 8 +- .../transformer/Excel/Excel_NORMSDIST.md | 8 +- .../transformer/Excel/Excel_NORMSINV.md | 8 +- .../reference/transformer/Excel/Excel_NOT.md | 8 +- .../reference/transformer/Excel/Excel_NPER.md | 8 +- .../reference/transformer/Excel/Excel_NPV.md | 8 +- .../reference/transformer/Excel/Excel_ODD.md | 8 +- .../reference/transformer/Excel/Excel_OR.md | 8 +- .../transformer/Excel/Excel_PEARSON.md | 8 +- .../transformer/Excel/Excel_PERCENTILE.md | 8 +- .../transformer/Excel/Excel_PERCENTRANK.md | 8 +- .../reference/transformer/Excel/Excel_PI.md | 8 +- .../reference/transformer/Excel/Excel_PMT.md | 8 +- .../transformer/Excel/Excel_POISSON.md | 8 +- .../transformer/Excel/Excel_POWER.md | 8 +- .../reference/transformer/Excel/Excel_PPMT.md | 8 +- .../transformer/Excel/Excel_PRODUCT.md | 8 +- .../transformer/Excel/Excel_PROPER.md | 8 +- .../reference/transformer/Excel/Excel_PV.md | 8 +- .../transformer/Excel/Excel_RADIANS.md | 8 +- .../reference/transformer/Excel/Excel_RAND.md | 8 +- .../reference/transformer/Excel/Excel_RANK.md | 8 +- .../reference/transformer/Excel/Excel_RATE.md | 8 +- .../transformer/Excel/Excel_REPLACE.md | 8 +- .../reference/transformer/Excel/Excel_REPT.md | 8 +- .../transformer/Excel/Excel_RIGHT.md | 8 +- .../transformer/Excel/Excel_ROMAN.md | 8 +- .../transformer/Excel/Excel_ROUND.md | 8 +- .../transformer/Excel/Excel_ROUNDDOWN.md | 8 +- .../transformer/Excel/Excel_ROUNDUP.md | 8 +- .../transformer/Excel/Excel_SEARCH.md | 8 +- .../reference/transformer/Excel/Excel_SIGN.md | 8 +- .../reference/transformer/Excel/Excel_SIN.md | 8 +- .../reference/transformer/Excel/Excel_SINH.md | 8 +- .../transformer/Excel/Excel_SLOPE.md | 8 +- .../transformer/Excel/Excel_SMALL.md | 8 +- .../reference/transformer/Excel/Excel_SQRT.md | 8 +- .../transformer/Excel/Excel_STANDARDIZE.md | 8 +- .../transformer/Excel/Excel_STDEV.md | 8 +- .../transformer/Excel/Excel_STDEVA.md | 8 +- .../transformer/Excel/Excel_STDEVP.md | 8 +- .../transformer/Excel/Excel_STDEVPA.md | 8 +- .../transformer/Excel/Excel_SUBSTITUTE.md | 8 +- .../reference/transformer/Excel/Excel_SUM.md | 8 +- .../transformer/Excel/Excel_SUMPRODUCT.md | 8 +- .../transformer/Excel/Excel_SUMSQ.md | 8 +- .../transformer/Excel/Excel_SUMX2MY2.md | 8 +- .../transformer/Excel/Excel_SUMX2PY2.md | 8 +- .../transformer/Excel/Excel_SUMXMY2.md | 8 +- .../reference/transformer/Excel/Excel_TAN.md | 8 +- .../reference/transformer/Excel/Excel_TANH.md | 8 +- .../transformer/Excel/Excel_TDIST.md | 8 +- .../reference/transformer/Excel/Excel_TRUE.md | 8 +- .../transformer/Excel/Excel_TRUNC.md | 8 +- .../reference/transformer/Excel/Excel_VAR.md | 8 +- .../reference/transformer/Excel/Excel_VARA.md | 8 +- .../reference/transformer/Excel/Excel_VARP.md | 8 +- .../transformer/Excel/Excel_VARPA.md | 8 +- .../transformer/Extract/regexExtract.md | 32 +- .../transformer/Filter/filterByLength.md | 11 +- .../transformer/Filter/filterByRegex.md | 11 +- .../Filter/removeDefaultStopWords.md | 8 +- .../transformer/Filter/removeEmptyValues.md | 8 +- .../Filter/removeRemoteStopWords.md | 26 +- .../transformer/Filter/removeStopWords.md | 26 +- .../transformer/Filter/removeValues.md | 9 +- .../transformer/Geo/RetrieveCoordinates.md | 7 - .../transformer/Geo/RetrieveLatitude.md | 7 - .../transformer/Geo/RetrieveLongitude.md | 7 - .../transformer/Linguistic/NYSIIS.md | 19 +- .../transformer/Linguistic/metaphone.md | 5 +- .../transformer/Linguistic/normalizeChars.md | 5 +- .../transformer/Linguistic/soundex.md | 19 +- .../reference/transformer/Linguistic/stem.md | 5 +- .../transformer/Metadata/fileHash.md | 11 +- .../Metadata/inputFileAttributes.md | 9 +- .../Metadata/inputTaskAttributes.md | 9 +- .../transformer/Normalize/alphaReduce.md | 5 +- .../transformer/Normalize/camelCase.md | 22 +- .../transformer/Normalize/capitalize.md | 18 +- .../transformer/Normalize/htmlCleaner.md | 17 +- .../transformer/Normalize/lowerCase.md | 7 +- .../transformer/Normalize/removeBlanks.md | 5 +- .../transformer/Normalize/removeDuplicates.md | 5 +- .../Normalize/removeParentheses.md | 5 +- .../Normalize/removeSpecialChars.md | 5 +- .../transformer/Normalize/sortWords.md | 27 +- .../reference/transformer/Normalize/trim.md | 5 +- .../transformer/Normalize/upperCase.md | 5 +- .../reference/transformer/Normalize/uriFix.md | 26 +- .../transformer/Normalize/urlEncode.md | 16 +- .../Numeric/PhysicalQuantitiesNormalizer.md | 125 ++- .../transformer/Numeric/aggregateNumbers.md | 32 +- .../Numeric/cmem-plugin-number-conversion.md | 8 +- .../transformer/Numeric/compareNumbers.md | 9 +- .../Numeric/extractPhysicalQuantity.md | 15 +- .../transformer/Numeric/formatNumber.md | 36 +- .../reference/transformer/Numeric/log.md | 9 +- .../transformer/Numeric/numOperation.md | 24 +- .../transformer/Numeric/numReduce.md | 18 +- .../transformer/Parser/DateTypeParser.md | 73 +- .../transformer/Parser/FloatTypeParser.md | 13 +- .../transformer/Parser/GeoCoordinateParser.md | 5 +- .../transformer/Parser/GeoLocationParser.md | 11 +- .../transformer/Parser/IntegerParser.md | 28 +- .../transformer/Parser/IsinParser.md | 5 +- .../transformer/Parser/SkosTypeParser.md | 9 +- .../transformer/Parser/StringParser.md | 8 +- .../reference/transformer/Replace/excelMap.md | 18 +- .../reference/transformer/Replace/map.md | 26 +- .../Replace/mapWithDefaultInput.md | 16 +- .../transformer/Replace/regexReplace.md | 30 +- .../reference/transformer/Replace/replace.md | 26 +- .../transformer/Selection/coalesce.md | 12 +- .../transformer/Selection/regexSelect.md | 18 +- .../reference/transformer/Sequence/count.md | 8 +- .../transformer/Sequence/getValueByIndex.md | 13 +- .../reference/transformer/Sequence/sort.md | 9 +- .../transformer/Sequence/toSequenceIndex.md | 7 +- .../transformer/Substring/stripPostfix.md | 18 +- .../transformer/Substring/stripPrefix.md | 18 +- .../transformer/Substring/stripUriPrefix.md | 22 +- .../transformer/Substring/substring.md | 40 +- .../transformer/Substring/untilCharacter.md | 18 +- .../Template/TemplateTransformer.md | 36 +- .../Tokenization/camelcasetokenizer.md | 8 +- .../transformer/Tokenization/tokenize.md | 18 +- .../Uncategorized/cmem-plugin-jq-transform.md | 14 +- .../cmem_plugin_currencies-transform.md | 13 - .../Validation/validateDateAfter.md | 20 +- .../Validation/validateDateRange.md | 11 +- .../Validation/validateNumberOfValues.md | 26 +- .../Validation/validateNumericRange.md | 11 +- .../transformer/Validation/validateRegex.md | 22 +- .../transformer/Value/cmem-plugin-ulid.md | 10 +- .../cmem_plugin_uuid-plugin_uuid-UUID1.md | 11 +- ...em_plugin_uuid-plugin_uuid-UUID1ToUUID6.md | 4 +- .../cmem_plugin_uuid-plugin_uuid-UUID3.md | 8 +- .../cmem_plugin_uuid-plugin_uuid-UUID4.md | 2 +- .../cmem_plugin_uuid-plugin_uuid-UUID5.md | 8 +- .../cmem_plugin_uuid-plugin_uuid-UUID6.md | 10 +- .../cmem_plugin_uuid-plugin_uuid-UUID7.md | 3 +- .../cmem_plugin_uuid-plugin_uuid-UUID8.md | 3 +- ...mem_plugin_uuid-plugin_uuid-UUIDConvert.md | 8 +- ...mem_plugin_uuid-plugin_uuid-UUIDVersion.md | 2 +- .../reference/transformer/Value/constant.md | 17 +- .../transformer/Value/constantUri.md | 9 +- .../transformer/Value/datasetParameter.md | 15 +- .../transformer/Value/defaultValue.md | 18 +- .../reference/transformer/Value/emptyValue.md | 5 +- .../reference/transformer/Value/inputHash.md | 17 +- .../transformer/Value/randomNumber.md | 15 +- .../transformer/Value/readParameter.md | 11 +- .../build/reference/transformer/Value/uuid.md | 9 +- docs/build/reference/transformer/index.md | 2 +- docs/build/snowflake-tutorial/index.md | 99 ++- .../define-the-interfaces/index.md | 66 +- .../define-the-need/index.md | 22 +- .../index.md | 710 +++++++++--------- .../index.md | 38 +- .../link-IDS-event-to-KG-via-cmem/index.md | 69 +- .../link-IDS-event-to-KG/index.md | 40 +- docs/build/variables/index.md | 11 +- docs/build/workflow-reconfiguration/index.md | 1 - .../consume-graphs-in-apache-kafka/index.md | 1 - .../consuming-graphs-in-power-bi/index.md | 6 +- .../consuming-graphs-with-redash/index.md | 1 - docs/consume/index.md | 14 +- docs/consume/populate-data-to-neo4j/index.md | 9 +- .../index.md | 1 - .../configuration/access-conditions/index.md | 5 +- .../configuration/caveats/index.md | 6 +- .../activity-reference/index.md | 48 -- .../configuration/dataintegration/index.md | 11 +- .../docker-orchestration/index.md | 18 +- .../explore/dataplatform/application-full.md | 133 +--- .../dataplatform/application-graphdb-full.md | 5 +- .../dataplatform/application-http-full.md | 9 +- .../dataplatform/application-inmemory-full.md | 7 +- .../dataplatform/application-neptune-full.md | 12 +- .../dataplatform/application-oauth-full.md | 21 +- .../dataplatform/application-virtuoso-full.md | 4 +- .../explore/dataplatform/index.md | 16 +- .../explore/graph-resource-pattern/index.md | 4 +- .../configuration/explore/index.md | 27 +- .../configuration/graphinsights/index.md | 32 +- .../change-passwords-and-keys/index.md | 1 - .../configuration/keycloak/index.md | 176 ++--- .../keycloak/using-external-keycloak/index.md | 6 +- .../index.md | 5 +- .../production-ready-settings/index.md | 4 +- .../quad-store-configuration/index.md | 26 +- .../configuration/reverse-proxy/index.md | 12 +- docs/deploy-and-configure/index.md | 9 +- .../installation/migrating-stores/index.md | 2 +- .../index.md | 13 +- .../requirements/graph-insights-sizing.md | 10 +- .../requirements/index.md | 25 +- .../system-architecture/index.md | 8 +- .../index.md | 1 - docs/develop/cmemc-scripts/index.md | 2 - docs/develop/cmempy-python-api/index.md | 2 +- docs/develop/dataintegration-apis/index.md | 1 - docs/develop/dataplatform-apis/index.md | 18 +- docs/develop/index.md | 6 +- .../python-plugins/development/index.md | 44 +- docs/develop/python-plugins/index.md | 6 +- .../python-plugins/installation/index.md | 3 +- docs/develop/python-plugins/setup/index.md | 23 +- .../charts-catalog/index.md | 16 +- docs/explore-and-author/companion/index.md | 24 +- .../datatype-reference/index.md | 46 +- .../node-shapes/index.md | 52 -- .../property-shapes/index.md | 73 +- .../workflow-trigger/index.md | 13 +- .../graph-exploration/index.md | 18 +- .../statement-annotations/index.md | 13 +- .../versioning-of-graph-changes/index.md | 1 - docs/explore-and-author/index.md | 9 +- docs/explore-and-author/link-rules/index.md | 98 +-- .../thesauri-management/index.md | 4 +- .../vocabulary-catalog/index.md | 10 +- docs/getting-started/index.md | 40 +- .../with-your-sandbox/index.md | 32 +- .../with-your-sandbox/material.md | 8 +- docs/index.md | 19 +- .../corporate-memory-19-10/index.md | 145 ++-- .../corporate-memory-20-03/index.md | 7 +- .../corporate-memory-20-06/index.md | 1 - .../corporate-memory-20-10/index.md | 11 +- .../corporate-memory-20-12/index.md | 4 +- .../corporate-memory-21-02/index.md | 1 - .../corporate-memory-21-04/index.md | 1 - .../corporate-memory-21-06/index.md | 6 +- .../corporate-memory-21-11/index.md | 8 +- .../corporate-memory-22-1/index.md | 11 +- .../corporate-memory-22-2/index.md | 558 +++++++------- .../corporate-memory-23-1/index.md | 312 ++++---- .../corporate-memory-23-2/index.md | 294 ++++---- .../corporate-memory-23-3/index.md | 244 +++--- .../corporate-memory-24-1/index.md | 336 ++++----- .../corporate-memory-24-2/index.md | 411 +++++----- .../corporate-memory-24-3/index.md | 618 +++++++-------- .../corporate-memory-25-1/index.md | 261 ++++--- .../corporate-memory-25-2/index.md | 254 +++---- .../corporate-memory-25-3/index.md | 367 +++++---- docs/tags.md | 1 - docs/testing.md | 56 +- docs/tutorials/index.md | 1 - 515 files changed, 4388 insertions(+), 9201 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 71b4489e0..57ea9bb44 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -31,9 +31,9 @@ We suggest to use a specialized markdown editor such as [obsidian](https://obsid The following tools you need locally to get started: -- [poetry](https://python-poetry.org/) -- [task](https://taskfile.dev/) -- git, markdown editor +- [poetry](https://python-poetry.org/) +- [task](https://taskfile.dev/) +- git, markdown editor On a few OS distributions (e.g. Arch Linux) the tool/binary is named `go-task`. @@ -71,26 +71,26 @@ Have a look at the [mkdocs-material documentation](https://squidfunk.github.io/m
Extend section -- always create a directory + `index.md`, e.g. `my-topic/index.md` ([Example](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/automate/cmemc-command-line-interface)) -- add new pages to the `.pages` file to add them in the right order and with correct title to the menu ([Example](https://github.com/eccenca/documentation.eccenca.com/blob/main/docs/automate/cmemc-command-line-interface/.pages)) -- put images side by side to the `index.md` ([Example](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/release-notes/corporate-memory-22-1)) -- do not use images for icons esp. icons from the application - - use eccenca icons, e.g. [:eccenca-application-queries:](https://github.com/eccenca/documentation.eccenca.com/blob/main/overrides/.icons/eccenca/application-queries.svg) -> [list](https://github.com/eccenca/documentation.eccenca.com/tree/main/overrides/.icons/eccenca) - - use theme icons where no eccenca icon is available -> [list](https://squidfunk.github.io/mkdocs-material/reference/icons-emojis/#search) -- name image files properly (not just `Screenshot.xxx.png`, [Example](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/release-notes/corporate-memory-22-1)) -- used advanced features where suitable - - [Admonitions](https://squidfunk.github.io/mkdocs-material/reference/admonitions/#usage) (esp. use notes and warnings where needed) -> see Admonition section for more details - - [Code Blocks](https://squidfunk.github.io/mkdocs-material/reference/code-blocks/#usage) (e.g. enable highlightning and add a title) - - [Content Tabs](https://squidfunk.github.io/mkdocs-material/reference/content-tabs/#usage) (to structure complex pages) -- code blocks: - - do not use line numbers except you refer to it in the text - - use correct syntax highlightning (often used: `shell-session`, `bash`, `sparql`, `turtle`, `json`) -> [list of syntax IDs](https://pygments.org/docs/lexers/) - - do not confuse `shell-session` with `bash` (the first is a terminal output, the latter a script syntax) - - when using `shell-session`, use `$` as the prompt -- Links: - - do not use absolute links for internal documents, e.g. `https://documentation.eccenca.com/latest/...` - - do not use base-relative links, e.g. `/automate/...` - - use relative links to `index.md` files +- always create a directory + `index.md`, e.g. `my-topic/index.md` ([Example](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/automate/cmemc-command-line-interface)) +- add new pages to the `.pages` file to add them in the right order and with correct title to the menu ([Example](https://github.com/eccenca/documentation.eccenca.com/blob/main/docs/automate/cmemc-command-line-interface/.pages)) +- put images side by side to the `index.md` ([Example](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/release-notes/corporate-memory-22-1)) +- do not use images for icons esp. icons from the application + - use eccenca icons, e.g. [:eccenca-application-queries:](https://github.com/eccenca/documentation.eccenca.com/blob/main/overrides/.icons/eccenca/application-queries.svg) -> [list](https://github.com/eccenca/documentation.eccenca.com/tree/main/overrides/.icons/eccenca) + - use theme icons where no eccenca icon is available -> [list](https://squidfunk.github.io/mkdocs-material/reference/icons-emojis/#search) +- name image files properly (not just `Screenshot.xxx.png`, [Example](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/release-notes/corporate-memory-22-1)) +- used advanced features where suitable + - [Admonitions](https://squidfunk.github.io/mkdocs-material/reference/admonitions/#usage) (esp. use notes and warnings where needed) -> see Admonition section for more details + - [Code Blocks](https://squidfunk.github.io/mkdocs-material/reference/code-blocks/#usage) (e.g. enable highlightning and add a title) + - [Content Tabs](https://squidfunk.github.io/mkdocs-material/reference/content-tabs/#usage) (to structure complex pages) +- code blocks: + - do not use line numbers except you refer to it in the text + - use correct syntax highlightning (often used: `shell-session`, `bash`, `sparql`, `turtle`, `json`) -> [list of syntax IDs](https://pygments.org/docs/lexers/) + - do not confuse `shell-session` with `bash` (the first is a terminal output, the latter a script syntax) + - when using `shell-session`, use `$` as the prompt +- Links: + - do not use absolute links for internal documents, e.g. `https://documentation.eccenca.com/latest/...` + - do not use base-relative links, e.g. `/automate/...` + - use relative links to `index.md` files
@@ -104,13 +104,13 @@ On this page is search function for icons available as well.
Extend section -|Admonition Name|Used For |Example| -|---------------|---------|-------| -|Info |Information in documentation provides details about a topic or process that the reader needs to know. It is usually essential and relevant to the main subject of the document.|Statement annotations provide a way to express knowledge about statements. This group is dedicated to properties that configure the Statement Annotation feature| -|Note |A note provides additional details that may or may not be directly related to the main topic. It could be an explanation, clarification, or an aside that the writer thinks would be helpful for the reader to know.|The graph selection drop-down might or might not be visible depending the existence of an (optional) EasyNav Module configuration. In case no specific module configuration exists or non has not has been set for the current workspace the graph selection will be shown. A EasyNav Module configuration pre-configures a graph. Thus, the dropdown will not be shown if such has been configured for the current workspace.| -|Abstract|An abstract is a brief summary that provides an overview of the main points or contents of a document. It typically appears at the beginning of a document and is intended to give the reader an idea of what to expect from the document.|This tutorial explores the benefits of using cloud computing in enterprise organizations. It discusses the advantages of cloud computing over traditional on-premises infrastructure, and provides guidance for migrating to the cloud.| -|Warning|It is used to convey the seriousness of the risk and the importance of taking necessary precautions to avoid harm.|If the remote file resource is used in more than one dataset, the other datasets are also affected by this command.| -|Tip|A tip is a type of admonition in documentation that provides a helpful suggestion or best practice related to the content of the document. It is typically used to guide the reader towards a more efficient or effective way of using a product or service, or to provide additional insights or recommendations.|We have the suggestion option as well; click on the +icon and select the suggestion mapping.| +| Admonition Name | Used For | Example | +| ---- ----------- | -------- | ------- | +| Info | Information in documentation provides details about a topic or process that the reader needs to know. It is usually essential and relevant to the main subject of the document. | Statement annotations provide a way to express knowledge about statements. This group is dedicated to properties that configure the Statement Annotation feature | +| Note | A note provides additional details that may or may not be directly related to the main topic. It could be an explanation, clarification, or an aside that the writer thinks would be helpful for the reader to know. | The graph selection drop-down might or might not be visible depending the existence of an (optional) EasyNav Module configuration. In case no specific module configuration exists or non has not has been set for the current workspace the graph selection will be shown. A EasyNav Module configuration pre-configures a graph. Thus, the dropdown will not be shown if such has been configured for the current workspace. | +| Abstract | An abstract is a brief summary that provides an overview of the main points or contents of a document. It typically appears at the beginning of a document and is intended to give the reader an idea of what to expect from the document. | This tutorial explores the benefits of using cloud computing in enterprise organizations. It discusses the advantages of cloud computing over traditional on-premises infrastructure, and provides guidance for migrating to the cloud. | +| Warning | It is used to convey the seriousness of the risk and the importance of taking necessary precautions to avoid harm. | If the remote file resource is used in more than one dataset, the other datasets are also affected by this command. | +| Tip | A tip is a type of admonition in documentation that provides a helpful suggestion or best practice related to the content of the document. It is typically used to guide the reader towards a more efficient or effective way of using a product or service, or to provide additional insights or recommendations. | We have the suggestion option as well; click on the +icon and select the suggestion mapping. | |Success|Success admonitions are a type of documentation element used to highlight successful outcomes or positive results associated with a particular task, process, or feature|Graph is created successfully.| |Bug|A bug admonition include a description of the bug or issue, as well as steps that the user can take to avoid or work around the problem. It may also include information about when the bug will be fixed or patched, if applicable.|Users may experience issues with the file saving feature when running this software on Windows 10. To avoid data loss or corruption, be sure to save your work frequently and consider using an external backup device. Our development team is working to resolve this issue in the next software update.| |Example|The example admonition is typically used in instructional or educational documents to clarify complex concepts or demonstrate how to perform a specific task.|To create a new email account, click on the "Sign Up" button on the homepage and enter your name, email address, and desired password. Be sure to choose a strong password with a mix of uppercase and lowercase letters, numbers, and special characters. Once you have entered your information, click "Create Account" to complete the process.| @@ -123,14 +123,14 @@ On this page is search function for icons available as well.
Extend section -- do not use a cluttered desktop -- do not show other esp. personal project artifacts then relevant for the tutorial / feature to show -- select cropping area carefully (omit backgrounds, lines on the edges, etc.) -- use the same or a similar area for similar screens -- all relevant elements should be clearly visible and not be truncated -- irrelevant elements / details should be omitted completely and not be half visible -- crop scrollbars (they can make edges look unclean, especially if a scrollbar is directly on an edge) -- keep an equal distance of all visible elements to the edges of the screenshot +- do not use a cluttered desktop +- do not show other esp. personal project artifacts then relevant for the tutorial / feature to show +- select cropping area carefully (omit backgrounds, lines on the edges, etc.) +- use the same or a similar area for similar screens +- all relevant elements should be clearly visible and not be truncated +- irrelevant elements / details should be omitted completely and not be half visible +- crop scrollbars (they can make edges look unclean, especially if a scrollbar is directly on an edge) +- keep an equal distance of all visible elements to the edges of the screenshot
@@ -138,12 +138,12 @@ On this page is search function for icons available as well.
The following files are generated from other sources and should not be changed: - -- cmemc Command Reference: [docs/automate/cmemc-command-line-interface/command-reference](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/automate/cmemc-command-line-interface/command-reference) + +- cmemc Command Reference: [docs/automate/cmemc-command-line-interface/command-reference](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/automate/cmemc-command-line-interface/command-reference) - to update, start `task update:cmemc` -- Coporate Memory Application Icons: [overrides/.icons/eccenca](https://github.com/eccenca/documentation.eccenca.com/tree/main/overrides/.icons/eccenca) +- Coporate Memory Application Icons: [overrides/.icons/eccenca](https://github.com/eccenca/documentation.eccenca.com/tree/main/overrides/.icons/eccenca) - to update, start `task update:icons` -- Shapes and Datatypes: [docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes|property-shapes|datatype-reference](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes) +- Shapes and Datatypes: [docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes|property-shapes|datatype-reference](https://github.com/eccenca/documentation.eccenca.com/tree/main/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes) - to update, start `task update:shape-reference` Where applicable, we added the following disclaimer: `` diff --git a/README.md b/README.md index caa637d51..628d3e310 100644 --- a/README.md +++ b/README.md @@ -4,10 +4,10 @@ Shared repository of the eccenca Corporate Memory documentation. -| Branch | Deployment | -| :--- | :--- | -| `main` | [https://dev.documentation.eccenca.com](https://dev.documentation.eccenca.com) | -| `published` | [https://documentation.eccenca.com](https://documentation.eccenca.com) | +| Branch | Deployment | +| :----- | :--------- | +| `main` | [https://dev.documentation.eccenca.com](https://dev.documentation.eccenca.com) | +| `published` | [https://documentation.eccenca.com](https://documentation.eccenca.com) | If you consider to contribute to this project, please have a look on [CONTRIBUTING.md](https://github.com/eccenca/documentation.eccenca.com/blob/main/CONTRIBUTING.md) @@ -27,4 +27,3 @@ This work is licensed under a [Creative Commons Attribution-ShareAlike 4.0 Inter [mkdocs-shield]: https://img.shields.io/badge/Made%20with-mkdocs-brightgreen [mkdocs]: https://www.mkdocs.org/ [markdown-shield]: https://img.shields.io/badge/Made%20with-Markdown-1f425f.svg - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/acl/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/acl/index.md index 7ba38f8e7..31b464ae7 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/acl/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/acl/index.md @@ -13,22 +13,16 @@ List, create, delete and modify and review access conditions. With this command group, you can manage and inspect access conditions in eccenca Corporate Memory. Access conditions are identified by a URL. They grant access to knowledge graphs or actions to user or groups. - ## admin acl list List access conditions. ```shell-session title="Usage" -$ cmemc admin acl list [OPTIONS] +cmemc admin acl list [OPTIONS] ``` - - - This command retrieves and lists all access conditions, which are manageable by the current account. - - ??? info "Options" ```text @@ -42,18 +36,12 @@ This command retrieves and lists all access conditions, which are manageable by Inspect an access condition. ```shell-session title="Usage" -$ cmemc admin acl inspect [OPTIONS] ACCESS_CONDITION_ID +cmemc admin acl inspect [OPTIONS] ACCESS_CONDITION_ID ``` - - - !!! note access conditions can be listed by using the `acl list` command. - - - ??? info "Options" ```text @@ -65,12 +53,9 @@ $ cmemc admin acl inspect [OPTIONS] ACCESS_CONDITION_ID Create an access condition. ```shell-session title="Usage" -$ cmemc admin acl create [OPTIONS] +cmemc admin acl create [OPTIONS] ``` - - - With this command, new access conditions can be created. An access condition captures information about WHO gets access to WHAT. In order to specify WHO gets access, use the ``--user`` and / or ``--group`` options. In order to specify WHAT an account get access to, use the ``--read-graph``, ``--write-graph`` and ``--action`` options.` @@ -82,14 +67,10 @@ A special case are dynamic access conditions, based on a SPARQL query: Here you !!! note Queries for dynamic access conditions are copied into the ACL, so changing the query in the query catalog does not change it in the access condition. - ```shell-session title="Example" -$ cmemc admin acl create --group local-users --write-graph https://example.org/ +cmemc admin acl create --group local-users --write-graph https://example.org/ ``` - - - ??? info "Options" ```text @@ -132,16 +113,11 @@ $ cmemc admin acl create --group local-users --write-graph https://example.org/ Update an access condition. ```shell-session title="Usage" -$ cmemc admin acl update [OPTIONS] ACCESS_CONDITION_ID +cmemc admin acl update [OPTIONS] ACCESS_CONDITION_ID ``` - - - Given an access condition URL, you can change specific options to new values. - - ??? info "Options" ```text @@ -180,20 +156,14 @@ Given an access condition URL, you can change specific options to new values. Delete access conditions. ```shell-session title="Usage" -$ cmemc admin acl delete [OPTIONS] [ACCESS_CONDITION_IDS]... +cmemc admin acl delete [OPTIONS] [ACCESS_CONDITION_IDS]... ``` - - - This command deletes existing access conditions from the account. !!! note Access conditions can be listed by using the `cmemc admin acs list` command. - - - ??? info "Options" ```text @@ -206,22 +176,16 @@ This command deletes existing access conditions from the account. Review grants for a given account. ```shell-session title="Usage" -$ cmemc admin acl review [OPTIONS] USER +cmemc admin acl review [OPTIONS] USER ``` - - - This command has two working modes: (1) You can review the access conditions of an actual account, (2) You can review the access conditions of an imaginary account with a set of freely added groups (what-if-scenario). The output of the command is a list of grants the account has based on your input and all access conditions loaded in the store. In addition to that, some metadata of the account is shown. - - ??? info "Options" ```text --raw Outputs raw JSON. --group TEXT Add groups to the review request (what-if-scenario). ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/client/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/client/index.md index 7e8b3e674..40df05985 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/client/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/client/index.md @@ -18,26 +18,19 @@ Client accounts are identified by a client ID which is unique in the scope of th In case your Corporate Memory deployment does not use the default deployment layout, the following additional config variables can be used in your connection configuration: ``KEYCLOAK_BASE_URI`` defaults to `{`CMEM_BASE_URI`}/auth` and locates your Keycloak deployment; ``KEYCLOAK_REALM_ID`` defaults to `cmem` and identifies the used realm. - ## admin client list List client accounts. ```shell-session title="Usage" -$ cmemc admin client list [OPTIONS] +cmemc admin client list [OPTIONS] ``` - - - Outputs a list of client accounts, which can be used to get an overview as well as a reference for the other commands of the `admin client` command group. !!! note The list command only outputs clients which have a client secret. - - - ??? info "Options" ```text @@ -51,16 +44,11 @@ Outputs a list of client accounts, which can be used to get an overview as well Get or generate a new secret for a client account. ```shell-session title="Usage" -$ cmemc admin client secret [OPTIONS] CLIENT_ID +cmemc admin client secret [OPTIONS] CLIENT_ID ``` - - - This command retrieves or generates a new secret for a client account from a realm. - - ??? info "Options" ```text @@ -73,15 +61,9 @@ This command retrieves or generates a new secret for a client account from a rea Open clients in the browser. ```shell-session title="Usage" -$ cmemc admin client open [CLIENT_IDS]... +cmemc admin client open [CLIENT_IDS]... ``` - - - With this command, you can open a client in the keycloak web interface in your browser. The command accepts multiple client IDs which results in opening multiple browser tabs. - - - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/index.md index 2fe523f72..fcdf08852 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/index.md @@ -12,18 +12,14 @@ Import bootstrap data, backup/restore workspace or get status. This command group consists of commands for setting up and configuring eccenca Corporate Memory. - ## admin status Output health and version information. ```shell-session title="Usage" -$ cmemc admin status [OPTIONS] +cmemc admin status [OPTIONS] ``` - - - This command outputs version and health information of the selected deployment. If the version information cannot be retrieved, UNKNOWN is shown. Additionally, this command informs you in one of these cases: (1) A warning, if the target version of your cmemc client is newer than the version of your backend. (2) A warning, if the ShapeCatalog has a different version than your Explore component. (3) An error, if your Corporate Memory license is expired (grace period). (4) A warning, if your Graph DB license will expire in less than a month. @@ -31,12 +27,9 @@ Additionally, this command informs you in one of these cases: (1) A warning, if To get status information of all configured deployments use this command in combination with parallel. ```shell-session title="Example" -$ cmemc config list | parallel --ctag cmemc -c {} admin status +cmemc config list | parallel --ctag cmemc -c {} admin status ``` - - - ??? info "Options" ```text @@ -66,23 +59,17 @@ $ cmemc config list | parallel --ctag cmemc -c {} admin status Fetch and output an access token. ```shell-session title="Usage" -$ cmemc admin token [OPTIONS] +cmemc admin token [OPTIONS] ``` - - - This command can be used to check for correct authentication as well as to use the token with wget / curl or similar standard tools: ```shell-session title="Example" -$ curl -H "Authorization: Bearer $(cmemc -c my admin token)" $(cmemc -c my config get DP_API_ENDPOINT)/api/custom/slug +curl -H "Authorization: Bearer $(cmemc -c my admin token)" $(cmemc -c my config get DP_API_ENDPOINT)/api/custom/slug ``` - Please be aware that this command can reveal secrets which you might not want to be present in log files or on the screen. - - ??? info "Options" ```text @@ -94,4 +81,3 @@ Please be aware that this command can reveal secrets which you might not want to --ttl Output information about the lifetime of the access token. In combination with --raw, it outputs the TTL in seconds. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/metrics/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/metrics/index.md index 569f808f8..c52c4b772 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/metrics/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/metrics/index.md @@ -14,24 +14,18 @@ This command group consists of commands for reading and listing internal monitor Each metric family can consist of different samples identified by labels with a name and a value (dimensions). A metric has a specific type (counter, gauge, summary and histogram) and additional metadata. -Please have a look at https://prometheus.io/docs/concepts/data_model/ for further details. - +Please have a look at for further details. ## admin metrics get Get sample data of a metric. ```shell-session title="Usage" -$ cmemc admin metrics get [OPTIONS] METRIC_ID +cmemc admin metrics get [OPTIONS] METRIC_ID ``` - - - A metric of a specific job is identified by a metric ID. Possible metric IDs of a job can be retrieved with the `metrics list` command. A metric can contain multiple samples. These samples are distinguished by labels (name and value). - - ??? info "Options" ```text @@ -56,16 +50,11 @@ A metric of a specific job is identified by a metric ID. Possible metric IDs of Inspect a metric. ```shell-session title="Usage" -$ cmemc admin metrics inspect [OPTIONS] METRIC_ID +cmemc admin metrics inspect [OPTIONS] METRIC_ID ``` - - - This command outputs the data of a metric. The first table includes basic metadata about the metric. The second table includes sample labels and values. - - ??? info "Options" ```text @@ -77,16 +66,11 @@ This command outputs the data of a metric. The first table includes basic metada List metrics for a specific job. ```shell-session title="Usage" -$ cmemc admin metrics list [OPTIONS] +cmemc admin metrics list [OPTIONS] ``` - - - For each metric, the output table shows the metric ID, the type of the metric, a count of how many labels (label names) are describing the samples (L) and a count of how many samples are currently available for a metric (S). - - ??? info "Options" ```text @@ -97,4 +81,3 @@ For each metric, the output table shows the metric ID, the type of the metric, a --raw Outputs (sorted) JSON dict, parsed from the metrics API output. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/migration/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/migration/index.md index 0ef7ba6bb..2ac6119b8 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/migration/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/migration/index.md @@ -16,22 +16,16 @@ Beside an ID and a description, migration recipes have the following metadata: ' The following tags are important: `system` recipes target data structures which are needed to run the most basic functionality properly. These recipes can and should be applied after each version upgrade. `user` recipes can change user and / or customizing data. `acl` recipes migrate access condition data. `shapes` recipes migrate shape data. `config` recipes migrate configuration data. - ## admin migration list List migration recipies. ```shell-session title="Usage" -$ cmemc admin migration list [OPTIONS] +cmemc admin migration list [OPTIONS] ``` - - - This command lists all available migration recipies - - ??? info "Options" ```text @@ -48,20 +42,15 @@ This command lists all available migration recipies Execute needed migration recipes. ```shell-session title="Usage" -$ cmemc admin migration execute [OPTIONS] [MIGRATION_ID] +cmemc admin migration execute [OPTIONS] [MIGRATION_ID] ``` - - - This command executes one or more migration recipes. Each recipe has a check method to determine if a migration is needed. In addition to that, the current component version needs to match the specified first-last-version range of the recipe. Recipes are executed ordered by first_version. Here are some argument examples, in order to see how to use this command: execute `--all` `--test-only` will list all needed migrations (but not execute them), execute `--filter` tag system will apply all migrations which target system data, execute bootstrap-data will apply bootstrap-data migration if needed. - - ??? info "Options" ```text @@ -72,4 +61,3 @@ Here are some argument examples, in order to see how to use this command: execut --test-only Only test, do not execute migrations. --id-only Lists only recipe identifier. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/store/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/store/index.md index afa877976..443245576 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/store/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/store/index.md @@ -13,26 +13,19 @@ Import, export and bootstrap the knowledge graph store. This command group consist of commands to administrate the knowledge graph store as a whole. - ## admin store showcase Create showcase data. ```shell-session title="Usage" -$ cmemc admin store showcase [OPTIONS] +cmemc admin store showcase [OPTIONS] ``` - - - This command creates a showcase scenario of multiple graphs including integration graphs, shapes, statement annotations, etc. !!! note There is currently no deletion mechanism for the showcase data, and you need to remove the showcase graphs manually (or just remove all graphs). - - - ??? info "Options" ```text @@ -49,12 +42,9 @@ This command creates a showcase scenario of multiple graphs including integratio Update/Import or remove bootstrap data. ```shell-session title="Usage" -$ cmemc admin store bootstrap [OPTIONS] +cmemc admin store bootstrap [OPTIONS] ``` - - - Use ``--import`` to import the bootstrap data needed for managing shapes and configuration objects. This will remove the old data first. Use ``--remove`` to delete bootstrap data. @@ -62,13 +52,9 @@ Use ``--remove`` to delete bootstrap data. !!! note The removal of existing bootstrap data will search for resources which are flagged with the isSystemResource property. - !!! note The import part of this command is equivalent to the 'bootstrap-data' migration recipe - - - ??? info "Options" ```text @@ -82,18 +68,13 @@ Use ``--remove`` to delete bootstrap data. Backup all knowledge graphs to a ZIP archive. ```shell-session title="Usage" -$ cmemc admin store export [OPTIONS] [BACKUP_FILE] +cmemc admin store export [OPTIONS] [BACKUP_FILE] ``` - - - The backup file is a ZIP archive containing all knowledge graphs (one Turtle file + configuration file per graph). This command will create lots of load on the server. It can take a long time to complete. - - ??? info "Options" ```text @@ -106,38 +87,27 @@ This command will create lots of load on the server. It can take a long time to Restore graphs from a ZIP archive. ```shell-session title="Usage" -$ cmemc admin store import BACKUP_FILE +cmemc admin store import BACKUP_FILE ``` - - - The backup file is a ZIP archive containing all knowledge graphs (one Turtle file + configuration file per graph). The command will load a single backup ZIP archive into the triple store by replacing all graphs with the content of the Turtle files in the archive and deleting all graphs which are not in the archive. This command will create lots of load on the server. It can take a long time to complete. The backup file will be transferred to the server, then unzipped and imported graph by graph. After the initial transfer the network connection is not used anymore and may be closed by proxies. This does not mean that the import failed. - - ## admin store migrate Migrate configuration resources to the current version. ```shell-session title="Usage" -$ cmemc admin store migrate [OPTIONS] +cmemc admin store migrate [OPTIONS] ``` - - - This command serves two purposes: (1) When invoked without an option, it lists all migrateable configuration resources. (2) When invoked with the ``--workspaces`` option, it migrates the workspace configurations to the current version. - - ??? info "Options" ```text --workspaces Migrate workspace configurations to the current version. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/user/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/user/index.md index ac1916833..28cca892e 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/user/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/user/index.md @@ -18,22 +18,16 @@ User accounts are identified by a username which unique in the scope of the used In case your Corporate Memory deployment does not use the default deployment layout, the following additional config variables can be used in your connection configuration: ``KEYCLOAK_BASE_URI`` defaults to `/auth` on ``CMEM_BASE_URI`` and locates your Keycloak deployment; ``KEYCLOAK_REALM_ID`` defaults to `cmem` and identifies the used realm. - ## admin user list List user accounts. ```shell-session title="Usage" -$ cmemc admin user list [OPTIONS] +cmemc admin user list [OPTIONS] ``` - - - Outputs a list of user accounts, which can be used to get an overview as well as a reference for the other commands of the `admin user` command group. - - ??? info "Options" ```text @@ -50,31 +44,22 @@ Outputs a list of user accounts, which can be used to get an overview as well as Create a user account. ```shell-session title="Usage" -$ cmemc admin user create USERNAME +cmemc admin user create USERNAME ``` - - - This command creates a new user account. !!! note The created user account has no metadata such as personal data or group assignments. In order to add these details to a user account, use the `admin user update` command. - - - ## admin user update Update a user account. ```shell-session title="Usage" -$ cmemc admin user update [OPTIONS] USERNAME +cmemc admin user update [OPTIONS] USERNAME ``` - - - This command updates metadata and group assignments of a user account. For each data value, a separate option needs to be used. All options can be combined in a single execution. @@ -82,9 +67,6 @@ For each data value, a separate option needs to be used. All options can be comb !!! note In order to assign a group to a user account, the group need to be added or imported to the realm upfront. - - - ??? info "Options" ```text @@ -100,39 +82,27 @@ For each data value, a separate option needs to be used. All options can be comb Delete a user account. ```shell-session title="Usage" -$ cmemc admin user delete USERNAME +cmemc admin user delete USERNAME ``` - - - This command deletes a user account from a realm. !!! note The deletion of a user account does not delete the assigned groups of this account, only the assignments to these groups. - - - ## admin user password Change the password of a user account. ```shell-session title="Usage" -$ cmemc admin user password [OPTIONS] USERNAME +cmemc admin user password [OPTIONS] USERNAME ``` - - - With this command, the password of a user account can be changed. The default execution mode of this command is an interactive prompt which asks for the password (twice). In order automate password changes, you can use the ``--value`` option. !!! warning Providing passwords on the command line can be dangerous (e.g. due to a potential exploitation in the shell history). A suggested more save way for automation is to provide the password in a variable first (e.g. with `NEW_PASS=$(pwgen -1 40)`) and use it afterward in the cmemc call: `cmemc admin user password max --value ${NEW_PASS}`. - - - ??? info "Options" ```text @@ -149,15 +119,9 @@ With this command, the password of a user account can be changed. The default ex Open user in the browser. ```shell-session title="Usage" -$ cmemc admin user open [USERNAMES]... +cmemc admin user open [USERNAMES]... ``` - - - With this command, you can open a user in the keycloak console in your browser to change them. The command accepts multiple usernames which results in opening multiple browser tabs. - - - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/index.md index 671ef37c6..f8d0f6f69 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/index.md @@ -10,24 +10,18 @@ tags: Import, export and reload the project workspace. - ## admin workspace export Export the complete workspace (all projects) to a ZIP file. ```shell-session title="Usage" -$ cmemc admin workspace export [OPTIONS] [FILE] +cmemc admin workspace export [OPTIONS] [FILE] ``` - - - Depending on the requested export type, this ZIP file contains either one Turtle file per project (type `rdfTurtle`) or a substructure of resource files and XML descriptions (type `xmlZip`). The file name is optional and will be generated with by the template if absent. - - ??? info "Options" ```text @@ -49,13 +43,9 @@ The file name is optional and will be generated with by the template if absent. Import the workspace from a file. ```shell-session title="Usage" -$ cmemc admin workspace import [OPTIONS] FILE +cmemc admin workspace import [OPTIONS] FILE ``` - - - - ??? info "Options" ```text @@ -67,10 +57,5 @@ $ cmemc admin workspace import [OPTIONS] FILE Reload the workspace from the backend. ```shell-session title="Usage" -$ cmemc admin workspace reload +cmemc admin workspace reload ``` - - - - - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/python/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/python/index.md index 8d83b58a4..251d86113 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/python/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/python/index.md @@ -16,19 +16,14 @@ Python packages are used to extend the Build (DataIntegration) workspace with py !!! warning Installing packages from unknown sources is not recommended. Plugins are not verified for malicious code. - - ## admin workspace python install Install a python package to the workspace. ```shell-session title="Usage" -$ cmemc admin workspace python install PACKAGE +cmemc admin workspace python install PACKAGE ``` - - - This command is essentially a `pip install` in the remote python environment. You can install a package by uploading a source distribution .tar.gz file, by uploading a build distribution .whl file, or by specifying a package name, i.e., a pip requirement specifier with a package name available on pypi.org (e.g. `requests==2.27.1`). @@ -36,24 +31,16 @@ You can install a package by uploading a source distribution .tar.gz file, by up !!! note The tab-completion of this command lists only public packages from pypi.org and not from additional or changed python package repositories you may have configured on the server. - - - ## admin workspace python uninstall Uninstall a python packages from the workspace. ```shell-session title="Usage" -$ cmemc admin workspace python uninstall [OPTIONS] [PACKAGE_NAME]... +cmemc admin workspace python uninstall [OPTIONS] [PACKAGE_NAME]... ``` - - - This command is essentially a `pip uninstall` in the remote python environment. - - ??? info "Options" ```text @@ -67,18 +54,13 @@ This command is essentially a `pip uninstall` in the remote python environment. List installed python packages. ```shell-session title="Usage" -$ cmemc admin workspace python list [OPTIONS] +cmemc admin workspace python list [OPTIONS] ``` - - - This command is essentially a `pip list` in the remote python environment. It outputs a table of python package identifiers with version information. - - ??? info "Options" ```text @@ -96,20 +78,14 @@ It outputs a table of python package identifiers with version information. List installed workspace plugins. ```shell-session title="Usage" -$ cmemc admin workspace python list-plugins [OPTIONS] +cmemc admin workspace python list-plugins [OPTIONS] ``` - - - This commands lists all discovered plugins. !!! note The plugin discovery is restricted to package prefix (`cmem-`). - - - ??? info "Options" ```text @@ -123,28 +99,17 @@ This commands lists all discovered plugins. Open a package pypi.org page in the browser. ```shell-session title="Usage" -$ cmemc admin workspace python open PACKAGE +cmemc admin workspace python open PACKAGE ``` - - - With this command, you can open the pypi.org page of a published package in your browser. From there, you can follow links, review the version history as well as the origin of the package, and read the provided documentation. - - ## admin workspace python reload Reload / Register all installed plugins. ```shell-session title="Usage" -$ cmemc admin workspace python reload +cmemc admin workspace python reload ``` - - - This command will register all installed plugins into the Build (DataIntegration) workspace. This command is useful, when you are installing packages into the Build Python environment without using the provided cmemc commands (e.g. by mounting a prepared filesystem in the docker container). - - - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/config/index.md b/docs/automate/cmemc-command-line-interface/command-reference/config/index.md index 17b54af1d..190479152 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/config/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/config/index.md @@ -64,12 +64,9 @@ manual. List configured connections. ```shell-session title="Usage" -$ cmemc config list +cmemc config list ``` - - - This command lists all configured connections from the currently used config file. The connection identifier can be used with the `--connection` option in order to use a specific Corporate Memory instance. @@ -77,29 +74,21 @@ The connection identifier can be used with the `--connection` option in order to In order to apply commands on more than one instance, you need to use typical unix gear such as xargs or parallel. ```shell-session title="Example" -$ cmemc config list | xargs -I % sh -c 'cmemc -c % admin status' +cmemc config list | xargs -I % sh -c 'cmemc -c % admin status' ``` - ```shell-session title="Example" -$ cmemc config list | parallel --jobs 5 cmemc -c {} admin status +cmemc config list | parallel --jobs 5 cmemc -c {} admin status ``` - - - ## config edit Edit the user-scope configuration file. ```shell-session title="Usage" -$ cmemc config edit +cmemc config edit ``` - - - - ## config get Get the value of a known cmemc configuration key. @@ -111,48 +100,34 @@ $ cmemc config get {cmem_base_uri|ssl_verify|requests_ca_bundle|dp_api_end id|oauth_client_secret|oauth_access_token} ``` - - - In order to automate processes such as fetching custom API data from multiple Corporate Memory instances, this command provides a way to get the value of a cmemc configuration key for the selected deployment. ```shell-session title="Example" -$ curl -H "Authorization: Bearer $(cmemc -c my admin token)" $(cmemc -c my config get DP_API_ENDPOINT)/api/custom/slug +curl -H "Authorization: Bearer $(cmemc -c my admin token)" $(cmemc -c my config get DP_API_ENDPOINT)/api/custom/slug ``` - The commands return with exit code 1 if the config key is not used in the current configuration. - - ## config eval Export all configuration values of a configuration for evaluation. ```shell-session title="Usage" -$ cmemc config eval [OPTIONS] +cmemc config eval [OPTIONS] ``` - - - The output of this command is suitable to be used by a shell's `eval` command. It will output the complete configuration as `export key="value"` statements, which allow for the preparation of a shell environment. ```shell-session title="Example" -$ eval $(cmemc -c my config eval) +eval $(cmemc -c my config eval) ``` - !!! warning Please be aware that credential details are shown in cleartext with this command. - - - ??? info "Options" ```text --unset Instead of exporting all configuration keys, this option will unset all keys. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/dataset/index.md b/docs/automate/cmemc-command-line-interface/command-reference/dataset/index.md index 3ee605aa4..57e050f15 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/dataset/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/dataset/index.md @@ -17,23 +17,16 @@ Datasets are identified by a combined key of the `PROJECT_ID` and a `DATASET_ID` !!! note To get a list of existing datasets, execute the `dataset list` command or use tab-completion. - - ## dataset list List available datasets. ```shell-session title="Usage" -$ cmemc dataset list [OPTIONS] +cmemc dataset list [OPTIONS] ``` - - - Output and filter a list of available datasets. Each dataset is listed with its ID, type and label. - - ??? info "Options" ```text @@ -53,24 +46,17 @@ Output and filter a list of available datasets. Each dataset is listed with its Delete datasets. ```shell-session title="Usage" -$ cmemc dataset delete [OPTIONS] [DATASET_IDS]... +cmemc dataset delete [OPTIONS] [DATASET_IDS]... ``` - - - This command deletes existing datasets in integration projects from Corporate Memory. The corresponding dataset resources will not be deleted. !!! warning Datasets will be deleted without prompting. - !!! note Datasets can be listed by using the `dataset list` command. - - - ??? info "Options" ```text @@ -91,12 +77,9 @@ This command deletes existing datasets in integration projects from Corporate Me Download the resource file of a dataset. ```shell-session title="Usage" -$ cmemc dataset download [OPTIONS] DATASET_ID OUTPUT_PATH +cmemc dataset download [OPTIONS] DATASET_ID OUTPUT_PATH ``` - - - This command downloads the file resource of a dataset to your local file system or to standard out (`-`). Note that this is not possible for dataset types such as Knowledge Graph (`eccencaDataplatform`) or SQL endpoint (`sqlEndpoint`). Without providing an output path, the output file name will be the same as the remote file resource. @@ -104,9 +87,6 @@ Without providing an output path, the output file name will be the same as the r !!! note Datasets can be listed by using the `dataset list` command. - - - ??? info "Options" ```text @@ -119,50 +99,35 @@ Without providing an output path, the output file name will be the same as the r Upload a resource file to a dataset. ```shell-session title="Usage" -$ cmemc dataset upload DATASET_ID INPUT_PATH +cmemc dataset upload DATASET_ID INPUT_PATH ``` - - - This command uploads a file to a dataset. The content of the uploaded file replaces the remote file resource. The name of the remote file resource will not be changed. !!! warning If the remote file resource is used in more than one dataset, all of these datasets are affected by this command. - !!! warning The content of the uploaded file is not tested, so uploading a JSON file to an XML dataset will result in errors. - !!! note Datasets can be listed by using the `dataset list` command. - ```shell-session title="Example" -$ cmemc dataset upload cmem:my-dataset new-file.csv +cmemc dataset upload cmem:my-dataset new-file.csv ``` - - - ## dataset inspect Display metadata of a dataset. ```shell-session title="Usage" -$ cmemc dataset inspect [OPTIONS] DATASET_ID +cmemc dataset inspect [OPTIONS] DATASET_ID ``` - - - !!! note Datasets can be listed by using the `dataset list` command. - - - ??? info "Options" ```text @@ -174,23 +139,17 @@ $ cmemc dataset inspect [OPTIONS] DATASET_ID Create a dataset. ```shell-session title="Usage" -$ cmemc dataset create [OPTIONS] [DATASET_FILE] +cmemc dataset create [OPTIONS] [DATASET_FILE] ``` - - - Datasets are created in projects and can have associated file resources. Each dataset has a type (such as `csv`) and a list of parameters which can alter or specify the dataset behaviour. To get more information about available dataset types and associated parameters, use the `--help-types` and `--help-parameter` options. ```shell-session title="Example" -$ cmemc dataset create --project my-project --type csv my-file.csv +cmemc dataset create --project my-project --type csv my-file.csv ``` - - - ??? info "Options" ```text @@ -223,40 +182,29 @@ $ cmemc dataset create --project my-project --type csv my-file.csv Open datasets in the browser. ```shell-session title="Usage" -$ cmemc dataset open DATASET_IDS... +cmemc dataset open DATASET_IDS... ``` - - - With this command, you can open a dataset in the workspace in your browser. The command accepts multiple dataset IDs which results in opening multiple browser tabs. - - ## dataset update Update a dataset. ```shell-session title="Usage" -$ cmemc dataset update [OPTIONS] DATASET_ID +cmemc dataset update [OPTIONS] DATASET_ID ``` - - - With this command, you can update the configuration of an existing dataset. Similar to the `dataset create` command, you need to use configuration key/value pairs on the ``--parameter`` option. To get more information about the available configuration parameters on a dataset, use the ``--help-parameter`` option. ```shell-session title="Example" -$ cmemc dataset update my-project:my-csv -p separator ";" +cmemc dataset update my-project:my-csv -p separator ";" ``` - - - ??? info "Options" ```text @@ -271,4 +219,3 @@ $ cmemc dataset update my-project:my-csv -p separator ";" Note that this option already needs access to the instance. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/dataset/resource/index.md b/docs/automate/cmemc-command-line-interface/command-reference/dataset/resource/index.md index eb852504f..c668607b0 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/dataset/resource/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/dataset/resource/index.md @@ -15,23 +15,16 @@ File resources are identified by their paths and project IDs. !!! warning This command group is deprecated and will be removed with the next major release. Please use the `project file` command group instead. - - ## dataset resource list List available file resources. ```shell-session title="Usage" -$ cmemc dataset resource list [OPTIONS] +cmemc dataset resource list [OPTIONS] ``` - - - Outputs a table or a list of file resources. - - ??? info "Options" ```text @@ -49,16 +42,11 @@ Outputs a table or a list of file resources. Delete file resources. ```shell-session title="Usage" -$ cmemc dataset resource delete [OPTIONS] [RESOURCE_IDS]... +cmemc dataset resource delete [OPTIONS] [RESOURCE_IDS]... ``` - - - There are three selection mechanisms: with specific IDs - only those specified resources will be deleted; by using `--filter` - resources based on the filter type and value will be deleted; by using `--all`, which will delete all resources. - - ??? info "Options" ```text @@ -76,13 +64,9 @@ There are three selection mechanisms: with specific IDs - only those specified r Display all metadata of a file resource. ```shell-session title="Usage" -$ cmemc dataset resource inspect [OPTIONS] RESOURCE_ID +cmemc dataset resource inspect [OPTIONS] RESOURCE_ID ``` - - - - ??? info "Options" ```text @@ -94,16 +78,11 @@ $ cmemc dataset resource inspect [OPTIONS] RESOURCE_ID Display all usage data of a file resource. ```shell-session title="Usage" -$ cmemc dataset resource usage [OPTIONS] RESOURCE_ID +cmemc dataset resource usage [OPTIONS] RESOURCE_ID ``` - - - - ??? info "Options" ```text --raw Outputs raw JSON. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/graph/imports/index.md b/docs/automate/cmemc-command-line-interface/command-reference/graph/imports/index.md index 19409ee67..59bc9b6fb 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/graph/imports/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/graph/imports/index.md @@ -16,25 +16,18 @@ Graphs are identified by an IRI. Statement imports are managed by creating owl:i !!! note The get a list of existing graphs, execute the `graph list` command or use tab-completion. - - ## graph imports tree Show graph tree(s) of the imports statement hierarchy. ```shell-session title="Usage" -$ cmemc graph imports tree [OPTIONS] [IRIS]... +cmemc graph imports tree [OPTIONS] [IRIS]... ``` - - - You can output one or more trees of the import hierarchy. Imported graphs which do not exist are shown as `[missing: IRI]`. Imported graphs which will result in an import cycle are shown as `[ignored: IRI]`. Each graph is shown with label and IRI. - - ??? info "Options" ```text @@ -51,16 +44,11 @@ Imported graphs which do not exist are shown as `[missing: IRI]`. Imported graph List accessible graph imports statements. ```shell-session title="Usage" -$ cmemc graph imports list [OPTIONS] +cmemc graph imports list [OPTIONS] ``` - - - Graphs are identified by an IRI. Statement imports are managed by creating owl:imports statements such as "`FROM_GRAPH` owl:imports `TO_GRAPH`" in the `FROM_GRAPH`. All statements in the `TO_GRAPH` are then available in the `FROM_GRAPH`. - - ??? info "Options" ```text @@ -74,36 +62,23 @@ Graphs are identified by an IRI. Statement imports are managed by creating owl:i Add statement to import a TO_GRAPH into a FROM_GRAPH. ```shell-session title="Usage" -$ cmemc graph imports create FROM_GRAPH TO_GRAPH +cmemc graph imports create FROM_GRAPH TO_GRAPH ``` - - - Graphs are identified by an IRI. Statement imports are managed by creating owl:imports statements such as "`FROM_GRAPH` owl:imports `TO_GRAPH`" in the `FROM_GRAPH`. All statements in the `TO_GRAPH` are then available in the `FROM_GRAPH`. !!! note The get a list of existing graphs, execute the `graph list` command or use tab-completion. - - - ## graph imports delete Delete statement to import a TO_GRAPH into a FROM_GRAPH. ```shell-session title="Usage" -$ cmemc graph imports delete FROM_GRAPH TO_GRAPH +cmemc graph imports delete FROM_GRAPH TO_GRAPH ``` - - - Graphs are identified by an IRI. Statement imports are managed by creating owl:imports statements such as "`FROM_GRAPH` owl:imports `TO_GRAPH`" in the `FROM_GRAPH`. All statements in the `TO_GRAPH` are then available in the `FROM_GRAPH`. !!! note The get a list of existing graph imports, execute the `graph imports list` command or use tab-completion. - - - - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/graph/index.md b/docs/automate/cmemc-command-line-interface/command-reference/graph/index.md index 1080fb2db..c1cc798af 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/graph/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/graph/index.md @@ -16,23 +16,16 @@ Graphs are identified by an IRI. !!! note The get a list of existing graphs, execute the `graph list` command or use tab-completion. - - ## graph count Count triples in graph(s). ```shell-session title="Usage" -$ cmemc graph count [OPTIONS] [IRIS]... +cmemc graph count [OPTIONS] [IRIS]... ``` - - - This command lists graphs with their triple count. Counts do not include imported graphs. - - ??? info "Options" ```text @@ -45,13 +38,9 @@ This command lists graphs with their triple count. Counts do not include importe (Hidden) Deprecated: use 'graph imports tree' instead. ```shell-session title="Usage" -$ cmemc graph tree [OPTIONS] [IRIS]... +cmemc graph tree [OPTIONS] [IRIS]... ``` - - - - ??? info "Options" ```text @@ -68,13 +57,9 @@ $ cmemc graph tree [OPTIONS] [IRIS]... List accessible graphs. ```shell-session title="Usage" -$ cmemc graph list [OPTIONS] +cmemc graph list [OPTIONS] ``` - - - - ??? info "Options" ```text @@ -95,16 +80,11 @@ $ cmemc graph list [OPTIONS] Export graph(s) as NTriples to stdout (-), file or directory. ```shell-session title="Usage" -$ cmemc graph export [OPTIONS] [IRIS]... +cmemc graph export [OPTIONS] [IRIS]... ``` - - - In case of file export, data from all selected graphs will be concatenated in one file. In case of directory export, .graph and .ttl files will be created for each graph. - - ??? info "Options" ```text @@ -141,13 +121,9 @@ In case of file export, data from all selected graphs will be concatenated in on Delete graph(s) from the store. ```shell-session title="Usage" -$ cmemc graph delete [OPTIONS] [IRIS]... +cmemc graph delete [OPTIONS] [IRIS]... ``` - - - - ??? info "Options" ```text @@ -163,12 +139,9 @@ $ cmemc graph delete [OPTIONS] [IRIS]... Import graph(s) to the store. ```shell-session title="Usage" -$ cmemc graph import [OPTIONS] INPUT_PATH [IRI] +cmemc graph import [OPTIONS] INPUT_PATH [IRI] ``` - - - If input is a file, content will be uploaded to the graph identified with the IRI. If input is a directory and NO IRI is given, it scans for file-pairs such as `xyz.ttl` and `xyz.ttl.graph`, where `xyz.ttl` is the actual triples file and `xyz.ttl.graph` contains the graph IRI in the first line: `https://mygraph.de/xyz/`. @@ -180,9 +153,6 @@ If the ``--replace`` flag is set, the data in the graphs will be overwritten, if !!! note Directories are scanned on the first level only (not recursively). - - - ??? info "Options" ```text @@ -203,10 +173,5 @@ If the ``--replace`` flag is set, the data in the graphs will be overwritten, if Open / explore a graph in the browser. ```shell-session title="Usage" -$ cmemc graph open IRI +cmemc graph open IRI ``` - - - - - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/graph/insights/index.md b/docs/automate/cmemc-command-line-interface/command-reference/graph/insights/index.md index 4daee238d..3934666a0 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/graph/insights/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/graph/insights/index.md @@ -12,22 +12,16 @@ List, create, delete and inspect graph insight snapshots. Graph Insight Snapshots are identified by an ID. To get a list of existing snapshots, execute the `graph insights list` command or use tab-completion. - ## graph insights list List graph insight snapshots. ```shell-session title="Usage" -$ cmemc graph insights list [OPTIONS] +cmemc graph insights list [OPTIONS] ``` - - - Graph Insights Snapshots are identified by an ID. - - ??? info "Options" ```text @@ -44,16 +38,11 @@ Graph Insights Snapshots are identified by an ID. Delete a graph insight snapshot. ```shell-session title="Usage" -$ cmemc graph insights delete [OPTIONS] [SNAPSHOT_ID] +cmemc graph insights delete [OPTIONS] [SNAPSHOT_ID] ``` - - - Graph Insight Snapshots are identified by an ID. To get a list of existing snapshots, execute the `graph insights list` command or use tab-completion. - - ??? info "Options" ```text @@ -68,16 +57,11 @@ Graph Insight Snapshots are identified by an ID. To get a list of existing snaps Create or update a graph insight snapshot. ```shell-session title="Usage" -$ cmemc graph insights create [OPTIONS] IRI +cmemc graph insights create [OPTIONS] IRI ``` - - - Create a graph insight snapshot for a given graph. If the snapshot already exists, it is hot-swapped after re-creation. The snapshot contains only the (imported) graphs the requesting user can read. - - ??? info "Options" ```text @@ -94,16 +78,11 @@ Create a graph insight snapshot for a given graph. If the snapshot already exist Update a graph insight snapshot. ```shell-session title="Usage" -$ cmemc graph insights update [OPTIONS] [SNAPSHOT_ID] +cmemc graph insights update [OPTIONS] [SNAPSHOT_ID] ``` - - - After the update, the snapshot is hot-swapped. - - ??? info "Options" ```text @@ -125,16 +104,11 @@ After the update, the snapshot is hot-swapped. Inspect the metadata of a graph insight snapshot. ```shell-session title="Usage" -$ cmemc graph insights inspect [OPTIONS] SNAPSHOT_ID +cmemc graph insights inspect [OPTIONS] SNAPSHOT_ID ``` - - - - ??? info "Options" ```text --raw Outputs raw JSON. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/graph/validation/index.md b/docs/automate/cmemc-command-line-interface/command-reference/graph/validation/index.md index 2932c48b8..f9d5528d1 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/graph/validation/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/graph/validation/index.md @@ -17,23 +17,16 @@ This command group is dedicated to the management of resource validation process !!! note Validation processes are identified with a random ID and can be listed with the `graph validation list` command. To start or cancel validation processes, use the `graph validation execute` and `graph validation cancel` command. To inspect the found violations of a validation process, use the `graph validation inspect` command. - - ## graph validation execute Start a new validation process. ```shell-session title="Usage" -$ cmemc graph validation execute [OPTIONS] IRI +cmemc graph validation execute [OPTIONS] IRI ``` - - - Validation is performed on all typed resources of the data / context graph (and its sub-graphs). Each resource is validated against all applicable node shapes from the shape catalog. - - ??? info "Options" ```text @@ -75,20 +68,14 @@ Validation is performed on all typed resources of the data / context graph (and List running and finished validation processes. ```shell-session title="Usage" -$ cmemc graph validation list [OPTIONS] +cmemc graph validation list [OPTIONS] ``` - - - This command provides a filterable table or identifier list of validation processes. The command operates on the process summary and provides some statistics. !!! note Detailed information on the found violations can be listed with the `graph validation inspect` command. - - - ??? info "Options" ```text @@ -106,12 +93,9 @@ This command provides a filterable table or identifier list of validation proces List and inspect errors found with a validation process. ```shell-session title="Usage" -$ cmemc graph validation inspect [OPTIONS] PROCESS_ID +cmemc graph validation inspect [OPTIONS] PROCESS_ID ``` - - - This command provides detailed information on the found violations of a validation process. Use the ``--filter`` option to limit the output based on different criteria such as constraint name (`constraint`), origin node shape of the rule (`node-shape`), or the validated resource (`resource`). @@ -119,9 +103,6 @@ Use the ``--filter`` option to limit the output based on different criteria such !!! note Validation processes IDs can be listed with the `graph validation list` command, or by utilizing the tab completion of this command. - - - ??? info "Options" ```text @@ -140,29 +121,20 @@ Use the ``--filter`` option to limit the output based on different criteria such Cancel a running validation process. ```shell-session title="Usage" -$ cmemc graph validation cancel PROCESS_ID +cmemc graph validation cancel PROCESS_ID ``` - - - !!! note In order to get the process IDs of all currently running validation processes, use the `graph validation list` command with the option `--filter status running`, or utilize the tab completion of this command. - - - ## graph validation export Export a report of finished validations. ```shell-session title="Usage" -$ cmemc graph validation export [OPTIONS] [PROCESS_IDS]... +cmemc graph validation export [OPTIONS] [PROCESS_IDS]... ``` - - - This command exports a jUnit XML or JSON report in order to process them somewhere else (e.g. a CI pipeline). You can export a single report of multiple validation processes. @@ -172,9 +144,6 @@ For jUnit XML: Each validation process result will be transformed to a single te !!! note Validation processes IDs can be listed with the `graph validation list` command, or by utilizing the tab completion of this command. - - - ??? info "Options" ```text @@ -188,4 +157,3 @@ For jUnit XML: Each validation process result will be transformed to a single te --format [JSON|XML] Export either the plain JSON report or a distilled jUnit XML report. [default: XML] ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/index.md b/docs/automate/cmemc-command-line-interface/command-reference/index.md index deb7a5a00..0026c6448 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/index.md @@ -132,4 +132,3 @@ tags: | [workflow scheduler](workflow/scheduler/index.md) | [inspect](workflow/scheduler/index.md#workflow-scheduler-inspect) | Display all metadata of a scheduler. | | [workflow scheduler](workflow/scheduler/index.md) | [disable](workflow/scheduler/index.md#workflow-scheduler-disable) | Disable scheduler(s). | | [workflow scheduler](workflow/scheduler/index.md) | [enable](workflow/scheduler/index.md#workflow-scheduler-enable) | Enable scheduler(s). | - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/project/file/index.md b/docs/automate/cmemc-command-line-interface/command-reference/project/file/index.md index 66577b561..be1ed4a6d 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/project/file/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/project/file/index.md @@ -16,23 +16,16 @@ File resources are identified with a `RESOURCE_ID` which is a concatenation of i !!! note To get a list of existing file resources, execute the `project file list` command or use tab-completion. - - ## project file list List available file resources. ```shell-session title="Usage" -$ cmemc project file list [OPTIONS] +cmemc project file list [OPTIONS] ``` - - - Outputs a table or a list of file resources. - - ??? info "Options" ```text @@ -50,16 +43,11 @@ Outputs a table or a list of file resources. Delete file resources. ```shell-session title="Usage" -$ cmemc project file delete [OPTIONS] [RESOURCE_IDS]... +cmemc project file delete [OPTIONS] [RESOURCE_IDS]... ``` - - - There are three selection mechanisms: with specific IDs - only those specified resources will be deleted; by using `--filter` - resources based on the filter type and value will be deleted; by using `--all`, which will delete all resources. - - ??? info "Options" ```text @@ -77,28 +65,21 @@ There are three selection mechanisms: with specific IDs - only those specified r Download file resources to the local file system. ```shell-session title="Usage" -$ cmemc project file download [OPTIONS] [RESOURCE_IDS]... +cmemc project file download [OPTIONS] [RESOURCE_IDS]... ``` - - - This command downloads one or more file resources from projects to your local file system. Files are saved with their resource names in the output directory. Resources are identified by their IDs in the format `PROJECT_ID`:`RESOURCE_NAME`. ```shell-session title="Example" -$ cmemc project file download my-proj:my-file.csv +cmemc project file download my-proj:my-file.csv ``` - ```shell-session title="Example" -$ cmemc project file download my-proj:file1.csv my-proj:file2.csv --output-dir /tmp +cmemc project file download my-proj:file1.csv my-proj:file2.csv --output-dir /tmp ``` - - - ??? info "Options" ```text @@ -114,25 +95,18 @@ $ cmemc project file download my-proj:file1.csv my-proj:file2.csv --output-dir / Upload a file to a project. ```shell-session title="Usage" -$ cmemc project file upload [OPTIONS] INPUT_PATH +cmemc project file upload [OPTIONS] INPUT_PATH ``` - - - This command uploads a file to a project as a file resource. !!! note If you want to create a dataset from your file, the `dataset create` command is maybe the better option. - ```shell-session title="Example" -$ cmemc project file upload my-file.csv --project my-project +cmemc project file upload my-file.csv --project my-project ``` - - - ??? info "Options" ```text @@ -151,13 +125,9 @@ $ cmemc project file upload my-file.csv --project my-project Display all metadata of a file resource. ```shell-session title="Usage" -$ cmemc project file inspect [OPTIONS] RESOURCE_ID +cmemc project file inspect [OPTIONS] RESOURCE_ID ``` - - - - ??? info "Options" ```text @@ -169,16 +139,11 @@ $ cmemc project file inspect [OPTIONS] RESOURCE_ID Display all usage data of a file resource. ```shell-session title="Usage" -$ cmemc project file usage [OPTIONS] RESOURCE_ID +cmemc project file usage [OPTIONS] RESOURCE_ID ``` - - - - ??? info "Options" ```text --raw Outputs raw JSON. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/project/index.md b/docs/automate/cmemc-command-line-interface/command-reference/project/index.md index e945e7f56..10a636684 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/project/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/project/index.md @@ -16,40 +16,28 @@ Projects are identified by a `PROJECT_ID`. !!! note To get a list of existing projects, execute the `project list` command or use tab-completion. - - ## project open Open projects in the browser. ```shell-session title="Usage" -$ cmemc project open PROJECT_IDS... +cmemc project open PROJECT_IDS... ``` - - - With this command, you can open a project in the workspace in your browser to change them. The command accepts multiple project IDs which results in opening multiple browser tabs. - - ## project list List available projects. ```shell-session title="Usage" -$ cmemc project list [OPTIONS] +cmemc project list [OPTIONS] ``` - - - Outputs a list of project IDs which can be used as reference for the project create, delete, export and import commands. - - ??? info "Options" ```text @@ -63,27 +51,20 @@ Outputs a list of project IDs which can be used as reference for the project cre Export projects to files. ```shell-session title="Usage" -$ cmemc project export [OPTIONS] [PROJECT_IDS]... +cmemc project export [OPTIONS] [PROJECT_IDS]... ``` - - - Projects can be exported with different export formats. The default type is a zip archive which includes metadata as well as dataset resources. If more than one project is exported, a file is created for each project. By default, these files are created in the current directory with a descriptive name (see `--template` option default). !!! note Projects can be listed by using the `project list` command. - You can use the template string to create subdirectories. ```shell-session title="Example" -$ cmemc config list | parallel -I% cmemc -c % project export --all -t "dump/{{connection}}/{{date}}-{{id}}.project" +cmemc config list | parallel -I% cmemc -c % project export --all -t "dump/{{connection}}/{{date}}-{{id}}.project" ``` - - - ??? info "Options" ```text @@ -119,19 +100,13 @@ $ cmemc config list | parallel -I% cmemc -c % project export --all -t "dump/{{co Import a project from a file or directory. ```shell-session title="Usage" -$ cmemc project import [OPTIONS] PATH [PROJECT_ID] +cmemc project import [OPTIONS] PATH [PROJECT_ID] ``` - - - ```shell-session title="Example" -$ cmemc project import my_project.zip my_project +cmemc project import my_project.zip my_project ``` - - - ??? info "Options" ```text @@ -144,24 +119,17 @@ $ cmemc project import my_project.zip my_project Delete projects. ```shell-session title="Usage" -$ cmemc project delete [OPTIONS] [PROJECT_IDS]... +cmemc project delete [OPTIONS] [PROJECT_IDS]... ``` - - - This command deletes existing data integration projects from Corporate Memory. !!! warning Projects will be deleted without prompting! - !!! note Projects can be listed with the `project list` command. - - - ??? info "Options" ```text @@ -174,20 +142,14 @@ This command deletes existing data integration projects from Corporate Memory. Create projects. ```shell-session title="Usage" -$ cmemc project create [OPTIONS] PROJECT_IDS... +cmemc project create [OPTIONS] PROJECT_IDS... ``` - - - This command creates one or more new projects. Existing projects will not be overwritten. !!! note Projects can be listed by using the `project list` command. - - - ??? info "Options" ```text @@ -209,27 +171,19 @@ This command creates one or more new projects. Existing projects will not be ove Reload projects from the workspace provider. ```shell-session title="Usage" -$ cmemc project reload [OPTIONS] [PROJECT_IDS]... +cmemc project reload [OPTIONS] [PROJECT_IDS]... ``` - - - This command reloads all tasks of a project from the workspace provider. This is similar to the `workspace reload` command, but for a single project only. !!! note You need this in case you changed project data externally or loaded a project which uses plugins which are not installed yet. In this case, install the plugin(s) and reload the project afterward. - !!! warning Depending on the size your datasets esp. your Knowledge Graphs, reloading a project can take a long time to re-create the path caches. - - - ??? info "Options" ```text -a, --all Reload all projects ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/project/variable/index.md b/docs/automate/cmemc-command-line-interface/command-reference/project/variable/index.md index 08369a03b..8e6e29fd0 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/project/variable/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/project/variable/index.md @@ -15,22 +15,16 @@ Project variables can be used in dataset and task parameters, and in the templat Variables are identified by a `VARIABLE_ID`. To get a list of existing variables, execute the list command or use tab-completion. The `VARIABLE_ID` is a concatenation of a `PROJECT_ID` and a `VARIABLE_NAME`, such as `my-project:my-variable`. - ## project variable list List available project variables. ```shell-session title="Usage" -$ cmemc project variable list [OPTIONS] +cmemc project variable list [OPTIONS] ``` - - - Outputs a table or a list of project variables. - - ??? info "Options" ```text @@ -49,20 +43,14 @@ Outputs a table or a list of project variables. Get the value or other data of a project variable. ```shell-session title="Usage" -$ cmemc project variable get [OPTIONS] VARIABLE_ID +cmemc project variable get [OPTIONS] VARIABLE_ID ``` - - - Use the ``--key`` option to specify which information you want to get. !!! note Only the `value` key is always available on a project variable. Static value variables have no `template` key, and the `description` key is optional for both types of variables. - - - ??? info "Options" ```text @@ -77,42 +65,29 @@ Use the ``--key`` option to specify which information you want to get. Delete a project variable. ```shell-session title="Usage" -$ cmemc project variable delete VARIABLE_ID +cmemc project variable delete VARIABLE_ID ``` - - - !!! note You can not delete a variable which is used by another (template based) variable. In order to do so, delete the template based variable first. - - - ## project variable create Create a new project variable. ```shell-session title="Usage" -$ cmemc project variable create [OPTIONS] VARIABLE_NAME +cmemc project variable create [OPTIONS] VARIABLE_NAME ``` - - - Variables need to be created with a value or a template (not both). In addition to that, a project ID and a name are mandatory. ```shell-session title="Example" -$ cmemc project variable create my_var --project my_project --value abc +cmemc project variable create my_var --project my_project --value abc ``` - !!! note cmemc is currently not able to manage the order of the variables in a project. This means you have to create plain value variables in advance, before you can create template based variables, which access these values. - - - ??? info "Options" ```text @@ -132,20 +107,14 @@ $ cmemc project variable create my_var --project my_project --value abc Update data of an existing project variable. ```shell-session title="Usage" -$ cmemc project variable update [OPTIONS] VARIABLE_ID +cmemc project variable update [OPTIONS] VARIABLE_ID ``` - - - With this command you can update the value or the template, as well as the description of a project variable. !!! note If you update the template of a static variable, it will be transformed to a template based variable. If you want to change the value of a template based variable, an error will be shown. - - - ??? info "Options" ```text @@ -156,4 +125,3 @@ With this command you can update the value or the template, as well as the descr accessing variables from the same project. --description TEXT The new description of the project variable. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/query/index.md b/docs/automate/cmemc-command-line-interface/command-reference/query/index.md index 8d181324d..27246cae7 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/query/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/query/index.md @@ -20,27 +20,20 @@ Queries can use a mustache like syntax to specify placeholder for parameter valu !!! note In order to get a list of queries from the query catalog, execute the `query list` command or use tab-completion. - - ## query execute Execute queries which are loaded from files or the query catalog. ```shell-session title="Usage" -$ cmemc query execute [OPTIONS] QUERIES... +cmemc query execute [OPTIONS] QUERIES... ``` - - - Queries are identified either by a file path, a URI from the query catalog, or a shortened URI (qname, using a default namespace). If multiple queries are executed one after the other, the first failing query stops the whole execution chain. Limitations: All optional parameters (e.g. accept, base64, ...) are provided for ALL queries in an execution chain. If you need different parameters for each query in a chain, run cmemc multiple times and use the logical operators && and || of your shell instead. - - ??? info "Options" ```text @@ -84,16 +77,11 @@ Limitations: All optional parameters (e.g. accept, base64, ...) are provided for List available queries from the catalog. ```shell-session title="Usage" -$ cmemc query list [OPTIONS] +cmemc query list [OPTIONS] ``` - - - Outputs a list of query URIs which can be used as reference for the query execute command. - - ??? info "Options" ```text @@ -109,18 +97,13 @@ Outputs a list of query URIs which can be used as reference for the query execut Open queries in the editor of the query catalog in your browser. ```shell-session title="Usage" -$ cmemc query open [OPTIONS] QUERIES... +cmemc query open [OPTIONS] QUERIES... ``` - - - With this command, you can open (remote) queries from the query catalog in the query editor in your browser (e.g. in order to change them). You can also load local query files into the query editor, in order to import them into the query catalog. The command accepts multiple query URIs or files which results in opening multiple browser tabs. - - ??? info "Options" ```text @@ -133,18 +116,13 @@ The command accepts multiple query URIs or files which results in opening multip Get status information of executed and running queries. ```shell-session title="Usage" -$ cmemc query status [OPTIONS] [QUERY_ID] +cmemc query status [OPTIONS] [QUERY_ID] ``` - - - With this command, you can access the latest executed SPARQL queries on the Explore backend (DataPlatform). These queries are identified by UUIDs and listed ordered by starting timestamp. You can filter queries based on status and runtime in order to investigate slow queries. In addition to that, you can get the details of a specific query by using the ID as a parameter. - - ??? info "Options" ```text @@ -162,27 +140,21 @@ You can filter queries based on status and runtime in order to investigate slow Re-execute queries from a replay file. ```shell-session title="Usage" -$ cmemc query replay [OPTIONS] REPLAY_FILE +cmemc query replay [OPTIONS] REPLAY_FILE ``` - - - This command reads a `REPLAY_FILE` and re-executes the logged queries. A `REPLAY_FILE` is a JSON document which is an array of JSON objects with at least a key `queryString` holding the query text OR a key `iri` holding the IRI of the query in the query catalog. It can be created with the `query status` command. ```shell-session title="Example" -$ query status --raw > replay.json +query status --raw > replay.json ``` - The output of this command shows basic query execution statistics. The queries are executed one after another in the order given in the input `REPLAY_FILE`. Query placeholders / parameters are ignored. If a query results in an error, the duration is not counted. The optional output file is the same JSON document which is used as input, but each query object is annotated with an additional `replays` object, which is an array of JSON objects which hold values for the replay|loop|run IDs, start and end time as well as duration and other data. - - ??? info "Options" ```text @@ -203,13 +175,7 @@ The optional output file is the same JSON document which is used as input, but e Cancel a running query. ```shell-session title="Usage" -$ cmemc query cancel QUERY_ID +cmemc query cancel QUERY_ID ``` - - - With this command, you can cancel a running query. Depending on the backend triple store, this will result in a broken result stream (stardog, neptune and virtuoso) or a valid result stream with incomplete results (graphdb) - - - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/cache/index.md b/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/cache/index.md index fa503b08d..c87bd4c99 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/cache/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/cache/index.md @@ -11,19 +11,14 @@ tags: List und update the vocabulary cache. - ## vocabulary cache update Reload / updates the data integration cache for a vocabulary. ```shell-session title="Usage" -$ cmemc vocabulary cache update [OPTIONS] [IRIS]... +cmemc vocabulary cache update [OPTIONS] [IRIS]... ``` - - - - ??? info "Options" ```text @@ -35,13 +30,9 @@ $ cmemc vocabulary cache update [OPTIONS] [IRIS]... Output the content of the global vocabulary cache. ```shell-session title="Usage" -$ cmemc vocabulary cache list [OPTIONS] +cmemc vocabulary cache list [OPTIONS] ``` - - - - ??? info "Options" ```text @@ -50,4 +41,3 @@ $ cmemc vocabulary cache list [OPTIONS] cmemc commands. --raw Outputs raw JSON. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/index.md b/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/index.md index e09e522e5..e874df055 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/index.md @@ -11,37 +11,26 @@ tags: List, (un-)install, import or open vocabs / manage cache. - ## vocabulary open Open / explore a vocabulary graph in the browser. ```shell-session title="Usage" -$ cmemc vocabulary open IRI +cmemc vocabulary open IRI ``` - - - Vocabularies are identified by their graph IRI. Installed vocabularies can be listed with the `vocabulary list` command. - - ## vocabulary list Output a list of vocabularies. ```shell-session title="Usage" -$ cmemc vocabulary list [OPTIONS] +cmemc vocabulary list [OPTIONS] ``` - - - Vocabularies are graphs (see `graph` command group) which consists of class and property descriptions. - - ??? info "Options" ```text @@ -60,16 +49,11 @@ Vocabularies are graphs (see `graph` command group) which consists of class and Install one or more vocabularies from the catalog. ```shell-session title="Usage" -$ cmemc vocabulary install [OPTIONS] [IRIS]... +cmemc vocabulary install [OPTIONS] [IRIS]... ``` - - - Vocabularies are identified by their graph IRI. Installable vocabularies can be listed with the vocabulary list command. - - ??? info "Options" ```text @@ -82,16 +66,11 @@ Vocabularies are identified by their graph IRI. Installable vocabularies can be Uninstall one or more vocabularies. ```shell-session title="Usage" -$ cmemc vocabulary uninstall [OPTIONS] [IRIS]... +cmemc vocabulary uninstall [OPTIONS] [IRIS]... ``` - - - Vocabularies are identified by their graph IRI. Already installed vocabularies can be listed with the vocabulary list command. - - ??? info "Options" ```text @@ -103,18 +82,13 @@ Vocabularies are identified by their graph IRI. Already installed vocabularies c Import a turtle file as a vocabulary. ```shell-session title="Usage" -$ cmemc vocabulary import [OPTIONS] FILE +cmemc vocabulary import [OPTIONS] FILE ``` - - - With this command, you can import a local ontology file as a named graph and create a corresponding vocabulary catalog entry. The uploaded ontology file is analysed locally in order to discover the named graph and the prefix declaration. This requires an OWL ontology description which correctly uses the `vann:preferredNamespacePrefix` and `vann:preferredNamespaceUri` properties. - - ??? info "Options" ```text @@ -126,4 +100,3 @@ The uploaded ontology file is analysed locally in order to discover the named gr --replace Replace (overwrite) existing vocabulary, if present. ``` - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/workflow/index.md b/docs/automate/cmemc-command-line-interface/command-reference/workflow/index.md index 139d7d6ab..312165d79 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/workflow/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/workflow/index.md @@ -13,26 +13,20 @@ List, execute, status or open (io) workflows. Workflows are identified by a `WORKFLOW_ID`. The get a list of existing workflows, execute the list command or use tab-completion. The `WORKFLOW_ID` is a concatenation of a `PROJECT_ID` and a `TASK_ID`, such as `my-project:my-workflow`. - ## workflow execute Execute workflow(s). ```shell-session title="Usage" -$ cmemc workflow execute [OPTIONS] [WORKFLOW_IDS]... +cmemc workflow execute [OPTIONS] [WORKFLOW_IDS]... ``` - - - With this command, you can start one or more workflows at the same time or in a sequence, depending on the result of the predecessor. Executing a workflow can be done in two ways: Without `--wait` just sends the starting signal and does not look for the workflow and its result (fire and forget). Starting workflows in this way, starts all given workflows at the same time. The optional `--wait` option starts the workflows in the same way, but also polls the status of a workflow until it is finished. In case of an error of a workflow, the next workflow is not started. - - ??? info "Options" ```text @@ -52,20 +46,14 @@ The optional `--wait` option starts the workflows in the same way, but also poll Execute a workflow with file input/output. ```shell-session title="Usage" -$ cmemc workflow io [OPTIONS] WORKFLOW_ID +cmemc workflow io [OPTIONS] WORKFLOW_ID ``` - - - With this command, you can execute a workflow that uses replaceable datasets as input, output or for configuration. Use the input parameter to feed data into the workflow. Likewise, use output for retrieval of the workflow result. Workflows without a replaceable dataset will throw an error. !!! note Regarding the input dataset configuration - the following rules apply: If autoconfig is enabled ('--autoconfig', the default), the dataset configuration is guessed. If autoconfig is disabled ('--no-autoconfig') and the type of the dataset file is the same as the replaceable dataset in the workflow, the configuration from this dataset is copied. If autoconfig is disabled and the type of the dataset file is different from the replaceable dataset in the workflow, the default config is used. - - - ??? info "Options" ```text @@ -101,13 +89,9 @@ With this command, you can execute a workflow that uses replaceable datasets as List available workflow. ```shell-session title="Usage" -$ cmemc workflow list [OPTIONS] +cmemc workflow list [OPTIONS] ``` - - - - ??? info "Options" ```text @@ -127,13 +111,9 @@ $ cmemc workflow list [OPTIONS] Get status information of workflow(s). ```shell-session title="Usage" -$ cmemc workflow status [OPTIONS] [WORKFLOW_IDS]... +cmemc workflow status [OPTIONS] [WORKFLOW_IDS]... ``` - - - - ??? info "Options" ```text @@ -150,10 +130,5 @@ $ cmemc workflow status [OPTIONS] [WORKFLOW_IDS]... Open a workflow in your browser. ```shell-session title="Usage" -$ cmemc workflow open WORKFLOW_ID +cmemc workflow open WORKFLOW_ID ``` - - - - - diff --git a/docs/automate/cmemc-command-line-interface/command-reference/workflow/scheduler/index.md b/docs/automate/cmemc-command-line-interface/command-reference/workflow/scheduler/index.md index f868a14fe..f18beaaa0 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/workflow/scheduler/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/workflow/scheduler/index.md @@ -13,24 +13,18 @@ List, inspect, enable/disable or open scheduler. Schedulers execute workflows in specified intervals. They are identified with a `SCHEDULER_ID`. To get a list of existing schedulers, execute the list command or use tab-completion. - ## workflow scheduler open Open scheduler(s) in the browser. ```shell-session title="Usage" -$ cmemc workflow scheduler open [OPTIONS] SCHEDULER_IDS... +cmemc workflow scheduler open [OPTIONS] SCHEDULER_IDS... ``` - - - With this command, you can open a scheduler in the workspace in your browser to change it. The command accepts multiple scheduler IDs which results in opening multiple browser tabs. - - ??? info "Options" ```text @@ -43,16 +37,11 @@ The command accepts multiple scheduler IDs which results in opening multiple bro List available scheduler. ```shell-session title="Usage" -$ cmemc workflow scheduler list [OPTIONS] +cmemc workflow scheduler list [OPTIONS] ``` - - - Outputs a table or a list of scheduler IDs which can be used as reference for the scheduler commands. - - ??? info "Options" ```text @@ -66,13 +55,9 @@ Outputs a table or a list of scheduler IDs which can be used as reference for th Display all metadata of a scheduler. ```shell-session title="Usage" -$ cmemc workflow scheduler inspect [OPTIONS] SCHEDULER_ID +cmemc workflow scheduler inspect [OPTIONS] SCHEDULER_ID ``` - - - - ??? info "Options" ```text @@ -84,16 +69,11 @@ $ cmemc workflow scheduler inspect [OPTIONS] SCHEDULER_ID Disable scheduler(s). ```shell-session title="Usage" -$ cmemc workflow scheduler disable [OPTIONS] [SCHEDULER_IDS]... +cmemc workflow scheduler disable [OPTIONS] [SCHEDULER_IDS]... ``` - - - The command accepts multiple scheduler IDs which results in disabling them one after the other. - - ??? info "Options" ```text @@ -105,19 +85,13 @@ The command accepts multiple scheduler IDs which results in disabling them one a Enable scheduler(s). ```shell-session title="Usage" -$ cmemc workflow scheduler enable [OPTIONS] [SCHEDULER_IDS]... +cmemc workflow scheduler enable [OPTIONS] [SCHEDULER_IDS]... ``` - - - The command accepts multiple scheduler IDs which results in enabling them one after the other. - - ??? info "Options" ```text -a, --all Enable all scheduler. ``` - diff --git a/docs/automate/cmemc-command-line-interface/configuration/certificate-handling-and-ssl-verification/index.md b/docs/automate/cmemc-command-line-interface/configuration/certificate-handling-and-ssl-verification/index.md index cc2a3becf..ae361974b 100644 --- a/docs/automate/cmemc-command-line-interface/configuration/certificate-handling-and-ssl-verification/index.md +++ b/docs/automate/cmemc-command-line-interface/configuration/certificate-handling-and-ssl-verification/index.md @@ -64,9 +64,9 @@ miGId7jMXd24bpfYZSiniC0+SHiCwEmzN818Ss9aIMChymAnV3RRB/UqKLlOMnA= You can also disable SSL Verification completely by setting the `SSL_VERIFY` key in the config or environment to `false`. However, this will lead to warnings: + ``` shell-session $ cmemc -c ssltest.eccenca.com graph list SSL verification is disabled (SSL_VERIFY=False). ... ``` - diff --git a/docs/automate/cmemc-command-line-interface/configuration/completion-setup/index.md b/docs/automate/cmemc-command-line-interface/configuration/completion-setup/index.md index 155655373..127f566f5 100644 --- a/docs/automate/cmemc-command-line-interface/configuration/completion-setup/index.md +++ b/docs/automate/cmemc-command-line-interface/configuration/completion-setup/index.md @@ -26,25 +26,22 @@ We suggest using [zsh](https://en.wikipedia.org/wiki/Z_shell) so you can take ad Use the following lines for the completion setup of cmemc >= 23.3. If using an older version, look at the [old documenation](https://documentation.eccenca.com/23.1/automate/cmemc-command-line-interface/configuration/completion-setup/). - In order to enable tab completion with **zsh** run the following command: ``` shell-session title="completion setup for zsh" -$ eval "$(_CMEMC_COMPLETE=zsh_source cmemc)" +eval "$(_CMEMC_COMPLETE=zsh_source cmemc)" ``` To enable the interactive menu as seen above in **zsh** run the following command: ``` shell-session title="interactive menu for zsh" -$ zstyle ':completion:*' menu select +zstyle ':completion:*' menu select ``` In order to enable tab completion with **bash** run the following command: ``` shell-session title="completion setup for bash" -$ eval "$(_CMEMC_COMPLETE=bash_source cmemc)" +eval "$(_CMEMC_COMPLETE=bash_source cmemc)" ``` You may want to add this line to your `.bashrc` or `.zshrc`. - - diff --git a/docs/automate/cmemc-command-line-interface/configuration/environment-based-configuration/index.md b/docs/automate/cmemc-command-line-interface/configuration/environment-based-configuration/index.md index 4d1fa12d5..b70ee965c 100644 --- a/docs/automate/cmemc-command-line-interface/configuration/environment-based-configuration/index.md +++ b/docs/automate/cmemc-command-line-interface/configuration/environment-based-configuration/index.md @@ -27,10 +27,10 @@ For these variables the rules are simple: You can use any variable from the [con The following commands provide the same result as given in the [basic example for a config file](../file-based-configuration/index.md): ``` shell-session -$ export CMEM_BASE_URI=http://localhost/ -$ export OAUTH_GRANT_TYPE=client_credentials -$ export OAUTH_CLIENT_ID=cmem-service-account -$ export OAUTH_CLIENT_SECRET=... +export CMEM_BASE_URI=http://localhost/ +export OAUTH_GRANT_TYPE=client_credentials +export OAUTH_CLIENT_ID=cmem-service-account +export OAUTH_CLIENT_SECRET=... ``` !!! info @@ -63,8 +63,8 @@ $ cmemc --config-file cmemc.ini --connection mycmem graph list --raw As a next step, we replace all connection parameters with environment variables: ``` shell-session -$ export CMEMC_CONFIG_FILE=cmemc.ini -$ export CMEMC_CONNECTION=mycmem +export CMEMC_CONFIG_FILE=cmemc.ini +export CMEMC_CONNECTION=mycmem ``` This alone allows us to save a lot of typing for a series of commands on the same Corporate Memory instance. @@ -77,7 +77,7 @@ $ cmemc graph list --raw However, you can also pre-define command options in the same way: ``` shell-session -$ export CMEMC_GRAPH_LIST_RAW=true +export CMEMC_GRAPH_LIST_RAW=true ``` Again, the same command but `--raw` is set per default. @@ -92,7 +92,7 @@ $ cmemc graph list Since there is a top level `--debug` option, the corresponding variable name is `CMEMC_DEBUG`: ``` shell-session -$ export CMEMC_DEBUG=true +export CMEMC_DEBUG=true ``` ## Configuration environment export from the config file @@ -118,12 +118,12 @@ export SSL_VERIFY="True" This can be used to export a full `config.env` or to `eval` it in an environment for other processes: ``` shell-session -$ cmemc -c my-cmem.example.org config eval > config.env -$ eval $(cmemc -c my-cmem.example.org config eval) +cmemc -c my-cmem.example.org config eval > config.env +eval $(cmemc -c my-cmem.example.org config eval) ``` Please note that the following command has the same effect but needs the `cmemc.ini` for evaluating the `config` values for the config section `my-cmem.example.org`: ``` shell-session -$ export CMEMC_CONNECTION="my-cmem.example.org" +export CMEMC_CONNECTION="my-cmem.example.org" ``` diff --git a/docs/automate/cmemc-command-line-interface/configuration/file-based-configuration/index.md b/docs/automate/cmemc-command-line-interface/configuration/file-based-configuration/index.md index a152b4575..6f4eb5339 100644 --- a/docs/automate/cmemc-command-line-interface/configuration/file-based-configuration/index.md +++ b/docs/automate/cmemc-command-line-interface/configuration/file-based-configuration/index.md @@ -26,7 +26,6 @@ If you need to change this location and want to use another config file, you hav However, once you start cmemc the first time without any command or option, it will create an empty configuration file at this location and will output a general introduction. - ??? example "First cmemc run ..." ``` shell-session $ cmemc @@ -270,4 +269,3 @@ Setting this to a PEM file allows for using private Certificate Authorities for Please refer to [Certificate handling and SSL verification](../certificate-handling-and-ssl-verification/index.md) for more information. This variable defaults to `$PYTHON_HOME/site-packages/certifi/cacert.pem`. - diff --git a/docs/automate/cmemc-command-line-interface/configuration/getting-credentials-from-external-processes/index.md b/docs/automate/cmemc-command-line-interface/configuration/getting-credentials-from-external-processes/index.md index 6ae2a4ccf..1995b8b91 100644 --- a/docs/automate/cmemc-command-line-interface/configuration/getting-credentials-from-external-processes/index.md +++ b/docs/automate/cmemc-command-line-interface/configuration/getting-credentials-from-external-processes/index.md @@ -20,11 +20,11 @@ As described in the [Configuration with Environment Variables](../environment-ba The following code snippet demonstrates the behaviour: ``` shell-session -$ export CMEM_BASE_URI="https://your-cmem.eccenca.dev/" -$ export OAUTH_GRANT_TYPE="client_credentials" -$ export OAUTH_CLIENT_ID="cmem-service-account" -$ export OAUTH_CLIENT_SECRET="...secret..." -$ cmemc graph list +export CMEM_BASE_URI="https://your-cmem.eccenca.dev/" +export OAUTH_GRANT_TYPE="client_credentials" +export OAUTH_CLIENT_ID="cmem-service-account" +export OAUTH_CLIENT_SECRET="...secret..." +cmemc graph list ``` In the context of a CI/CD pipeline, e.g., on github, these credentials can be taken from the repository secrets: @@ -48,7 +48,7 @@ jobs: In shell context, you can fetch the secret from an external process to the variable: ``` shell-session -$ export OAUTH_CLIENT_SECRET=$(get-my-secret.sh) +export OAUTH_CLIENT_SECRET=$(get-my-secret.sh) ``` ## External Processes @@ -116,4 +116,3 @@ if [ "${OAUTH_GRANT_TYPE}" = "password" ]; then fi exit 1 ``` - diff --git a/docs/automate/cmemc-command-line-interface/configuration/index.md b/docs/automate/cmemc-command-line-interface/configuration/index.md index b6b25c395..8dce0e911 100644 --- a/docs/automate/cmemc-command-line-interface/configuration/index.md +++ b/docs/automate/cmemc-command-line-interface/configuration/index.md @@ -10,28 +10,27 @@ hide: In order to work with cmemc, you have to configure it according to your needs. -
-- :material-file-cog-outline: File-based Configuration +- :material-file-cog-outline: File-based Configuration --- The most common way to configure cmemc is with a central [configuration file](file-based-configuration/index.md). -- :material-cog-outline: Environment-based Configuration +- :material-cog-outline: Environment-based Configuration --- In addition to configuration files, cmemc can be widely configured and parameterized with [environment variables](environment-based-configuration/index.md). -- :material-rocket-launch: Completion Setup +- :material-rocket-launch: Completion Setup --- Setting up [command completion](completion-setup/index.md) is optional but highly recommended and will greatly speed up your cmemc terminal sessions. -- :material-key-link: Security Considerations +- :material-key-link: Security Considerations --- diff --git a/docs/automate/cmemc-command-line-interface/index.md b/docs/automate/cmemc-command-line-interface/index.md index f54bb65f1..b39468a9a 100644 --- a/docs/automate/cmemc-command-line-interface/index.md +++ b/docs/automate/cmemc-command-line-interface/index.md @@ -13,7 +13,7 @@ tags:
-- :octicons-terminal-16: **Command Line** interface for **eccenca Corporate Memory** +- :octicons-terminal-16: **Command Line** interface for **eccenca Corporate Memory** --- @@ -33,7 +33,7 @@ tags: [![pypy downloads](https://img.shields.io/pypi/dm/cmem-cmemc.svg "pypy downloads"){ .off-glb }](https://pypi.python.org/pypi/cmem-cmemc/) [![Docker Image](https://img.shields.io/badge/docker-image-blue?logo=docker&logoColor=white "Docker Image"){ .off-glb }](./invocation/docker-image/index.md) -- :octicons-people-24: Intended for **Administrators** and **Linked Data Expert** +- :octicons-people-24: Intended for **Administrators** and **Linked Data Expert** --- @@ -46,13 +46,12 @@ tags: --filter tag velocity-daily ``` - 1. :person_raising_hand: + 1. :person_raising_hand: - The option `-c` is short for `--connection` and references to a remote Corporate Memory instance. - The `list` command in the `dataset` command group shows all datasets of an instance. - In order to manipulate output dataset list, the `--filter` option takes two parameter, a filter type (`tag`, `project`, ...) and a value. - -- :octicons-rocket-16: Fast ad-hoc Execution with **Command Completion** +- :octicons-rocket-16: Fast ad-hoc Execution with **Command Completion** --- @@ -61,8 +60,7 @@ tags:
Create Build Project and Dataset
- -- :material-feature-search-outline: **Main Features**: +- :material-feature-search-outline: **Main Features**: --- @@ -76,4 +74,3 @@ tags: ```
- diff --git a/docs/automate/cmemc-command-line-interface/installation/index.md b/docs/automate/cmemc-command-line-interface/installation/index.md index d36233d25..51cc27a0a 100644 --- a/docs/automate/cmemc-command-line-interface/installation/index.md +++ b/docs/automate/cmemc-command-line-interface/installation/index.md @@ -13,16 +13,13 @@ cmemc can be installed using the python package from pypi.org, the release packa cmemc is available as an [official pypi package](https://pypi.org/project/cmem-cmemc/) so installation can be done with pip or pipx (preferred): ``` shell-session -$ pipx install cmem-cmemc +pipx install cmem-cmemc ``` - ## ... via docker image This topic is described on a [stand-alone page](../invocation/docker-image/index.md). - !!! Note Once you have installed cmemc, you need to configure a connection with a [config file](../configuration/file-based-configuration/index.md) or learn how to [use environment variables](../configuration/environment-based-configuration/index.md) to control cmemc. - diff --git a/docs/automate/cmemc-command-line-interface/invocation/docker-image/index.md b/docs/automate/cmemc-command-line-interface/invocation/docker-image/index.md index d309bafa7..8a6ae7625 100644 --- a/docs/automate/cmemc-command-line-interface/invocation/docker-image/index.md +++ b/docs/automate/cmemc-command-line-interface/invocation/docker-image/index.md @@ -67,4 +67,3 @@ http://schema.org/,8809 https://vocab.eccenca.com/shacl/,1752 [...] ``` - diff --git a/docs/automate/cmemc-command-line-interface/invocation/github-action/index.md b/docs/automate/cmemc-command-line-interface/invocation/github-action/index.md index 54c3264f0..0982159a4 100644 --- a/docs/automate/cmemc-command-line-interface/invocation/github-action/index.md +++ b/docs/automate/cmemc-command-line-interface/invocation/github-action/index.md @@ -60,4 +60,3 @@ The Github project [eccenca/cmemc-workflow](https://github.com/eccenca/cmemc-wor Here is an example output: ![Example workflow output](example-workflow-output.png "Example workflow output") - diff --git a/docs/automate/cmemc-command-line-interface/invocation/gitlab-pipeline/index.md b/docs/automate/cmemc-command-line-interface/invocation/gitlab-pipeline/index.md index a40a0199d..cadc5bd90 100644 --- a/docs/automate/cmemc-command-line-interface/invocation/gitlab-pipeline/index.md +++ b/docs/automate/cmemc-command-line-interface/invocation/gitlab-pipeline/index.md @@ -61,4 +61,3 @@ The Github project [eccenca/cmemc-workflow](https://github.com/eccenca/cmemc-wor Here is an example output: ![Example pipeline output](example-pipeline-output.png "Example pipeline output") - diff --git a/docs/automate/cmemc-command-line-interface/invocation/index.md b/docs/automate/cmemc-command-line-interface/invocation/index.md index 418dd4a90..bdf6f2e96 100644 --- a/docs/automate/cmemc-command-line-interface/invocation/index.md +++ b/docs/automate/cmemc-command-line-interface/invocation/index.md @@ -12,12 +12,12 @@ Besides the plain ad-hoc invocation from a users terminal, the following recipes
-- :material-docker: Executing cmemc as a [Docker Container](docker-image/index.md). +- :material-docker: Executing cmemc as a [Docker Container](docker-image/index.md). -- :material-github: Running cmemc jobs as part of [Github Actions](github-action/index.md). +- :material-github: Running cmemc jobs as part of [Github Actions](github-action/index.md). -- :material-gitlab: Running cmemc jobs as part of [Gitlab Pipelines](gitlab-pipeline/index.md). +- :material-gitlab: Running cmemc jobs as part of [Gitlab Pipelines](gitlab-pipeline/index.md). -- :eccenca-application-queries: Preparing [SPARQL Scripts](sparql-scripts/index.md) to fetch data from your Knowledge Graphs. -
+- :eccenca-application-queries: Preparing [SPARQL Scripts](sparql-scripts/index.md) to fetch data from your Knowledge Graphs. +
diff --git a/docs/automate/cmemc-command-line-interface/invocation/sparql-scripts/index.md b/docs/automate/cmemc-command-line-interface/invocation/sparql-scripts/index.md index dc31287cc..f4315b000 100644 --- a/docs/automate/cmemc-command-line-interface/invocation/sparql-scripts/index.md +++ b/docs/automate/cmemc-command-line-interface/invocation/sparql-scripts/index.md @@ -34,7 +34,7 @@ This will set cmemc as an interpreter for the rest of the file, and by using the Now you need to define your SPARQL file as executable and run it: ``` shell-session -$ chmod a+x ./count-graphs.sh +chmod a+x ./count-graphs.sh ``` ``` shell-session @@ -48,4 +48,3 @@ https://ns.eccenca.com/data/queries/,39 https://ns.eccenca.com/data/config/,4 https://ns.eccenca.com/data/userinfo/,4 ``` - diff --git a/docs/automate/cmemc-command-line-interface/troubleshooting-and-caveats/index.md b/docs/automate/cmemc-command-line-interface/troubleshooting-and-caveats/index.md index 4b1037890..75c3f6aa5 100644 --- a/docs/automate/cmemc-command-line-interface/troubleshooting-and-caveats/index.md +++ b/docs/automate/cmemc-command-line-interface/troubleshooting-and-caveats/index.md @@ -36,4 +36,3 @@ This can have multiple reasons - please check in the following order: - `application.yaml` of DataIntegration - reverse proxy configuration - diff --git a/docs/automate/cmemc-command-line-interface/workflow-execution-and-orchestration/index.md b/docs/automate/cmemc-command-line-interface/workflow-execution-and-orchestration/index.md index 95c3a61ec..c271f4118 100644 --- a/docs/automate/cmemc-command-line-interface/workflow-execution-and-orchestration/index.md +++ b/docs/automate/cmemc-command-line-interface/workflow-execution-and-orchestration/index.md @@ -147,4 +147,3 @@ else exit 0 fi ``` - diff --git a/docs/automate/continuous-integration/index.md b/docs/automate/continuous-integration/index.md index ed967e2ca..c7780dc01 100644 --- a/docs/automate/continuous-integration/index.md +++ b/docs/automate/continuous-integration/index.md @@ -33,9 +33,8 @@ The following pages provide recipes for different CI/CD solutions:
-- :material-github: [Github Actions](../cmemc-command-line-interface/invocation/github-action/index.md) +- :material-github: [Github Actions](../cmemc-command-line-interface/invocation/github-action/index.md) -- :material-gitlab: [Gitlab Pipelines](../cmemc-command-line-interface/invocation/gitlab-pipeline/index.md) +- :material-gitlab: [Gitlab Pipelines](../cmemc-command-line-interface/invocation/gitlab-pipeline/index.md)
- diff --git a/docs/automate/index.md b/docs/automate/index.md index 48dc8623a..f7ff7c045 100644 --- a/docs/automate/index.md +++ b/docs/automate/index.md @@ -11,29 +11,28 @@ Setup processes and automate activities based on and towards your Knowledge Grap
-- :octicons-terminal-16: [cmemc - Command Line Interface](cmemc-command-line-interface/index.md) +- :octicons-terminal-16: [cmemc - Command Line Interface](cmemc-command-line-interface/index.md) --- cmemc is intended for system administrators and Linked Data experts, who want to automate and control activities on eccenca Corporate Memory remotely. -- :eccenca-artefact-workflow: [Processing data with variable input workflows](processing-data-with-variable-input-workflows/index.md) +- :eccenca-artefact-workflow: [Processing data with variable input workflows](processing-data-with-variable-input-workflows/index.md) --- This tutorial shows how you can create and use data integration workflows to process data coming from outside Corporate Memory (i.e., without registering datasets). -- :material-clock-start: [Scheduling Workflows](scheduling-workflows/index.md) +- :material-clock-start: [Scheduling Workflows](scheduling-workflows/index.md) --- For a time-based execution of a workflow, Corporate Memory provides the Scheduler operator. -- :material-github: [Continuous Integration and Delivery](continuous-integration/index.md) +- :material-github: [Continuous Integration and Delivery](continuous-integration/index.md) --- Setup processes which continuously integrate data artifacts such as vocabularies and shapes with your Corporate Memory instances.
- diff --git a/docs/automate/scheduling-workflows/index.md b/docs/automate/scheduling-workflows/index.md index 52444713d..7c35df02d 100644 --- a/docs/automate/scheduling-workflows/index.md +++ b/docs/automate/scheduling-workflows/index.md @@ -30,7 +30,6 @@ Once you are ready with the configurations, click **Create** button. Now, the sc ![Create a Scheduler](22-1-CreateScheduler.gif "Create a Scheduler") - ## Modify, enable or disable a scheduler 1. Navigate to **Build → Projects** section in the workspace. @@ -62,4 +61,3 @@ More common examples: - `PT30M` - every half hour - `PT1H` - every hour - `P1D` - every day - diff --git a/docs/build/active-learning/index.md b/docs/build/active-learning/index.md index 8a501a24c..a08d62f9e 100644 --- a/docs/build/active-learning/index.md +++ b/docs/build/active-learning/index.md @@ -37,7 +37,7 @@ The examples process below uses the **movies** example project which can be adde ## Creating an automatic link rule -- Choose properties to compare. +- Choose properties to compare. Select from the suggestions or search them by specifying property paths for both entities. ![image](22.2-Suggestion.png){ class="bordered" } @@ -48,15 +48,15 @@ The examples process below uses the **movies** example project which can be adde ## Add property paths for both entities -- Click on the Source path and select a path. +- Click on the Source path and select a path. ![image](22.2-Sourcepath.png){ class="bordered" } -- Click on the Target path and select a corresponding path. +- Click on the Target path and select a corresponding path. ![image](22.2-targetpath.png){ class="bordered" } -- Click on the :eccenca-item-add-artefact: icon to add the path pair to be examined in the learning algorithm. +- Click on the :eccenca-item-add-artefact: icon to add the path pair to be examined in the learning algorithm. ![image](22.2-plusicon.png){ class="bordered" } @@ -66,11 +66,11 @@ The examples process below uses the **movies** example project which can be adde ![image](22.2-stepresult1.png){ class="bordered" } -- Click on :eccenca-item-remove: icon to remove the paths. +- Click on :eccenca-item-remove: icon to remove the paths. ![image](22.2-delete.png){ class="bordered" } -- Click on Start learning. +- Click on Start learning. ![image](22.2-startlearning.png){ class="bordered" } @@ -105,7 +105,7 @@ The examples process below uses the **movies** example project which can be adde ![image](22.2-decline.png){ class="bordered" } -- On the right side of the page click on the 3 dots, then click on show entity’s URI. +- On the right side of the page click on the 3 dots, then click on show entity’s URI. ![image](22.2-uri.png){ class="bordered" } @@ -115,11 +115,11 @@ The examples process below uses the **movies** example project which can be adde ![image](22.2-reflinks.png){ class="bordered" } -- Click on Save based on our input confirm, uncertain and decline the link rule will get generated automatically and the score changes for these entities in the score bar. +- Click on Save based on our input confirm, uncertain and decline the link rule will get generated automatically and the score changes for these entities in the score bar. ![image](22.2-save.png){ class="bordered" } -- Switch on the save best learned rule, then click on save. +- Switch on the save best learned rule, then click on save. ![image](22.2-stepresult2.png){ class="bordered" } diff --git a/docs/build/define-prefixes-namespaces/index.md b/docs/build/define-prefixes-namespaces/index.md index aa20eca17..1ccfdcf2b 100644 --- a/docs/build/define-prefixes-namespaces/index.md +++ b/docs/build/define-prefixes-namespaces/index.md @@ -13,8 +13,8 @@ Namespace declarations allow for the abbreviation of IRIs by using a prefixed re For example, after defining a namespace with the values -- **prefix name** = `cohw`, and the -- **namespace IRI** = `https://data.company.org/hardware/` +- **prefix name** = `cohw`, and the +- **namespace IRI** = `https://data.company.org/hardware/` you can use the term `cohw:test` as an abbreviation for the full IRI `https://data.company.org/hardware/test`. @@ -28,8 +28,8 @@ After installing a vocabulary from the [Vocabulary Catalog](../../explore-and-a In order to get the **prefix name** and the **namespace IRI** from the vocabulary graph, the following terms from the [VANN vocabulary](https://vocab.org/vann/) need to be used on the Ontology resource. -- [vann:preferredNamespacePrefix](https://vocab.org/vann/#preferredNamespacePrefix) - to specify the **prefix name** -- [vann:preferredNamespaceUri](https://vocab.org/vann/#preferredNamespaceUri) - to specify the **namespace IRI** +- [vann:preferredNamespacePrefix](https://vocab.org/vann/#preferredNamespacePrefix) - to specify the **prefix name** +- [vann:preferredNamespaceUri](https://vocab.org/vann/#preferredNamespaceUri) - to specify the **namespace IRI** In the Explore area, an Ontology with a correct namespace declaration looks like this: @@ -51,10 +51,10 @@ In addition to the used vocabulary namespace declarations, you may want to add w Such organization use cases include: -- Namespaces per class / resource type: - - **prefix name** = `persons`, **namespace IRI** = `https://example.org/data/persons/` -- Namespaces per data owner or origin: - - **prefix name** = `sales`, **namespace IRI** = `https://example.org/data/sales/` +- Namespaces per class / resource type: + - **prefix name** = `persons`, **namespace IRI** = `https://example.org/data/persons/` +- Namespaces per data owner or origin: + - **prefix name** = `sales`, **namespace IRI** = `https://example.org/data/sales/` Prefixes in Data Integration are defined on a project basis. When creating a new project, a list of well-know prefixes is already declared. @@ -68,8 +68,8 @@ By using the **Edit Prefix Settings** button in this Configuration area, you wil In this dialog, you are able to -- Delete a namespace declaration → **Delete Prefix** -- Add a new namespace declaration → **Add** +- Delete a namespace declaration → **Delete Prefix** +- Add a new namespace declaration → **Add** ## Validating Namespace Declarations diff --git a/docs/build/evaluate-template/index.md b/docs/build/evaluate-template/index.md index 3c8ed195e..7042fac35 100644 --- a/docs/build/evaluate-template/index.md +++ b/docs/build/evaluate-template/index.md @@ -30,7 +30,7 @@ The graph dataset is attached to the email as an N-triples file. The following material is used in this tutorial: -- RDF graph containing company information regarding employees, products and services: [company.ttl](company.ttl) +- RDF graph containing company information regarding employees, products and services: [company.ttl](company.ttl) ```Turtle a prod:Hardware ; @@ -294,11 +294,11 @@ The tutorial consists of the following steps, which are described in detail belo 3. Fill in the required details, such as **Label**, your email credentials for sending, and the recipient email address(es). When finished, click **Create**. - - Host: The SMTP host, e.g, mail.myProvider.com - - Port: The SMTP port - - User: The username for the email account - - Password: The password for the email account - - To: The recipient email address(es) + - Host: The SMTP host, e.g, mail.myProvider.com + - Port: The SMTP port + - User: The username for the email account + - Password: The password for the email account + - To: The recipient email address(es)
@@ -320,17 +320,17 @@ The tutorial consists of the following steps, which are described in detail belo Items can be dragged from the list of items on the left side onto the canvas. To connect the outputs and inputs, click and hold the output on the right side of an item and drag it to the input on the left side of another item. - - The **Knowledge Graph dataset** connects to the **Request RDF triples task** and the **SPARQL Select query task**. - - The **Request RDF triples task** connects to the **RDF dataset**. + - The **Knowledge Graph dataset** connects to the **Request RDF triples task** and the **SPARQL Select query task**. + - The **Request RDF triples task** connects to the **RDF dataset**. It requests all triples from the products graph and sends them to the dataset. - - The **RDF dataset** connects to the **Send eMail task**. + - The **RDF dataset** connects to the **Send eMail task**. It holds the NTriples file that will be attached to the email. - - The **SPARQL Select query task** connects to the **Evaluate template task**. + - The **SPARQL Select query task** connects to the **Evaluate template task**. Note that the graph to be queried is specified in the SPARQL query itself with the FROM clause, while the input only triggers its execution. The query results are sent to its output. - - The **Evaluate template task** connects to the **Text dataset**. + - The **Evaluate template task** connects to the **Text dataset**. It receives the SPARQL query results and sends the evaluated Jinja template to its output. - - The **Text dataset** connects to the **Transform**. + - The **Text dataset** connects to the **Transform**. It holds the text file with the evaluated Jinja template and acts as input for the Transform. ![Workflow 1](workflow-1.png){ class="bordered" } @@ -340,11 +340,11 @@ The tutorial consists of the following steps, which are described in detail belo The **Evaluate template** operator can also be connected directly to the **Transform**. In this case, skip [§6](#6-create-a-text-dataset) and enter *output* instead of *text* for the **Value path** of the value mapping in the **Transform** (see [§7.6](#7-create-a-transform)). -1. Click on three dots of the **Send eMail** task, select **Config** and tick the check box to enable the config port. +5. Click on three dots of the **Send eMail** task, select **Config** and tick the check box to enable the config port. ![Set Config Port](config-port.png){ class="bordered" width="55%" } -2. Connect the output of the **Transform** to the config port located on the top of the **Send eMail** task. +6. Connect the output of the **Transform** to the config port located on the top of the **Send eMail** task. When finished, click **Save**. The complete workflow now looks as shown below. diff --git a/docs/build/extracting-data-from-a-web-api/index.md b/docs/build/extracting-data-from-a-web-api/index.md index f593781f4..2b2939a32 100644 --- a/docs/build/extracting-data-from-a-web-api/index.md +++ b/docs/build/extracting-data-from-a-web-api/index.md @@ -24,7 +24,7 @@ The tutorial is based on the [GitHub API (v3)](https://developer.github.com/v3/) In order to get familiar with the API, simply fetch an example response with this command: ``` shell-session -$ curl https://api.github.com/orgs/vocol/repos +curl https://api.github.com/orgs/vocol/repos ``` The HTTP Get request retrieves all repositories of a GitHub organization named vocol. @@ -146,4 +146,3 @@ To build a workflow that combines all the elements we previously built, we now d 5. Validate the result by clicking on the **Workflow** **Report** tab and see the result of your execution. In this example, 15x repositories were found from the GitHub API request. ![Workflow execution report](extract-from-api-wf-report.png) - diff --git a/docs/build/index.md b/docs/build/index.md index 96ecb187e..b180075bd 100644 --- a/docs/build/index.md +++ b/docs/build/index.md @@ -15,7 +15,7 @@ The Build stage is used to turn your legacy data points from existing datasets i
-- :eccenca-application-dataintegration: Introduction and Best Practices +- :eccenca-application-dataintegration: Introduction and Best Practices --- @@ -24,15 +24,14 @@ The Build stage is used to turn your legacy data points from existing datasets i - [Cool IRIs](cool-iris/index.md) --- URIs and IRIs are character strings identifying the nodes and edges in the graph. Defining them is an important step in creating an exploitable Knowledge Graph for your Company. - [Define Prefixes / Namespaces](define-prefixes-namespaces/index.md) --- Define Prefixes / Namespaces — Namespace declarations allow for abbreviation of IRIs by using a prefixed name instead of an IRI, in particular when writing SPARQL queries or Turtle. -- :material-list-status: Tutorials +- :material-list-status: Tutorials --- - - [Lift Data from Tabular Data](lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md) --- Build a Knowledge Graph from from Tabular Data such as CSV, XSLX or Database Tables. - - [Lift data from JSON and XML sources](lift-data-from-json-and-xml-sources/index.md) --- Build a Knowledge Graph based on input data from hierarchical sources such as JSON and XML files. - - [Extracting data from a Web API](extracting-data-from-a-web-api/index.md) --- Build a Knowledge Graph based on input data from a Web API. - - [Reconfigure Workflow Tasks](workflow-reconfiguration/index.md) --- During its execution, new parameters can be loaded from any source, which overwrites originally set parameters. - - [Incremental Database Loading](loading-jdbc-datasets-incrementally/index.md) --- Load data incrementally from a JDBC Dataset (relational database Table) into a Knowledge Graph. + - [Lift Data from Tabular Data](lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md) --- Build a Knowledge Graph from from Tabular Data such as CSV, XSLX or Database Tables. + - [Lift data from JSON and XML sources](lift-data-from-json-and-xml-sources/index.md) --- Build a Knowledge Graph based on input data from hierarchical sources such as JSON and XML files. + - [Extracting data from a Web API](extracting-data-from-a-web-api/index.md) --- Build a Knowledge Graph based on input data from a Web API. + - [Reconfigure Workflow Tasks](workflow-reconfiguration/index.md) --- During its execution, new parameters can be loaded from any source, which overwrites originally set parameters. + - [Incremental Database Loading](loading-jdbc-datasets-incrementally/index.md) --- Load data incrementally from a JDBC Dataset (relational database Table) into a Knowledge Graph.
- diff --git a/docs/build/integrations/index.md b/docs/build/integrations/index.md index 7ad316132..8a1005a68 100644 --- a/docs/build/integrations/index.md +++ b/docs/build/integrations/index.md @@ -14,7 +14,7 @@ The following services and applications can be easily integrated in Corporate Me
-- :simple-anthropic:{ .lg .middle } Anthropic / Claude +- :simple-anthropic:{ .lg .middle } Anthropic / Claude --- @@ -23,52 +23,45 @@ to interact with any [Anthropic / Claude provided Large Language Models](https://docs.claude.com/en/docs/about-claude/models/overview) (LLMs). - -- :other-apacheavro:{ .lg .middle } Avro +- :other-apacheavro:{ .lg .middle } Avro --- Use the [Avro](../../build/reference/dataset/avro.md) dataset to read and write files in the [Avro format](https://avro.apache.org/). - -- :material-microsoft-azure:{ .lg .middle } Azure AI Foundry +- :material-microsoft-azure:{ .lg .middle } Azure AI Foundry --- Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task to interact with any [Azure AI Foundry provided Large Language Models](https://ai.azure.com/catalog) (LLMs). - -- :fontawesome-solid-file-csv:{ .lg .middle } CSV +- :fontawesome-solid-file-csv:{ .lg .middle } CSV --- Comma-separated values (CSV) is a text data format which can be processed (read and write) with the [CSV Dataset](../../build/reference/dataset/csv.md). - -- :material-email-outline:{ .lg .middle } eMail / SMTP +- :material-email-outline:{ .lg .middle } eMail / SMTP --- Send plain text or HTML formatted [eMail messages](../../build/reference/customtask/SendEMail.md) using an SMTP server. - -- :material-file-excel:{ .lg .middle } Excel +- :material-file-excel:{ .lg .middle } Excel --- Use the [Excel](../../build/reference/dataset/excel.md) task to read and write to Excel workbooks in the Open XML format (XLSX). - -- :material-google-drive:{ .lg .middle } Google Drive +- :material-google-drive:{ .lg .middle } Google Drive --- Use the [Excel (Google Drive)](../../build/reference/dataset/googlespreadsheet.md) to read and write to Excel workbooks in Google Drive. - -- :other-graphdb:{ .lg .middle } GraphDB +- :other-graphdb:{ .lg .middle } GraphDB --- @@ -79,97 +72,84 @@ Query data from GraphDB by using the SPARQL [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. GraphDB can be used as the integrated Quad Store as well. - -- :simple-graphql:{ .lg .middle } GraphQL +- :simple-graphql:{ .lg .middle } GraphQL --- You can execute a [GraphQL query](../../build/reference/customtask/cmem_plugin_graphql-workflow-graphql-GraphQLPlugin.md) and process the result in a workflow. - -- :simple-apachehive:{ .lg .middle } Hive +- :simple-apachehive:{ .lg .middle } Hive --- Read from or write to an embedded Apache [Hive database](../../build/reference/dataset/Hive.md) endpoint. - -- :simple-jira:{ .lg .middle } Jira +- :simple-jira:{ .lg .middle } Jira --- Execute a [JQL query](../../build/reference/customtask/cmem_plugin_jira-JqlQuery.md) on a Jira instance to fetch and integrate issue data. - -- :material-code-json:{ .lg .middle } JSON +- :material-code-json:{ .lg .middle } JSON --- Use the [JSON](../../build/reference/dataset/json.md) dataset to read and write JSON files (JavaScript Object Notation). - -- :material-code-json:{ .lg .middle } JSON Lines +- :material-code-json:{ .lg .middle } JSON Lines --- Use the [JSON](../../build/reference/dataset/json.md) dataset to read and write files in the [JSON Lines](https://jsonlines.org/) text file format. - -- :simple-apachekafka:{ .lg .middle } Kafka +- :simple-apachekafka:{ .lg .middle } Kafka --- You can [send](../../build/reference/customtask/cmem_plugin_kafka-SendMessages.md) and [receive messages](../../build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md) to and from a Kafka topic. - -- :simple-kubernetes:{ .lg .middle } Kubernetes +- :simple-kubernetes:{ .lg .middle } Kubernetes --- You can [Execute a command in a kubernetes pod](../../build/reference/customtask/cmem_plugin_kubernetes-Execute.md) and captures its output to process it. - -- :simple-mariadb:{ .lg .middle } MariaDB +- :simple-mariadb:{ .lg .middle } MariaDB --- MariaDB can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://central.sonatype.com/artifact/org.mariadb.jdbc/mariadb-java-client/overview). - -- :simple-mattermost:{ .lg .middle } Mattermost +- :simple-mattermost:{ .lg .middle } Mattermost --- Send workflow reports or any other message to user and groups in you Mattermost with the [Send Mattermost messages](../../build/reference/customtask/cmem_plugin_mattermost.md) task. - -- :material-microsoft:{ .lg .middle } Microsoft SQL +- :material-microsoft:{ .lg .middle } Microsoft SQL --- The Microsoft SQL Server can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://central.sonatype.com/artifact/com.microsoft.sqlserver/mssql-jdbc). - -- :simple-mysql:{ .lg .middle } MySQL +- :simple-mysql:{ .lg .middle } MySQL --- MySQL can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://central.sonatype.com/artifact/org.mariadb.jdbc/mariadb-java-client/overview). - -- :simple-neo4j:{ .lg .middle } Neo4J +- :simple-neo4j:{ .lg .middle } Neo4J --- Use the [Neo4j](../../build/reference/dataset/neo4j.md) dataset for reading and writing [Neo4j graphs](https://neo4j.com/). - -- :other-neptune:{ .lg .middle } Neptune +- :other-neptune:{ .lg .middle } Neptune --- @@ -180,85 +160,74 @@ Query data from Amazon Neptune by using the SPARQL [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. Amazon Neptune can be used as the integrated Quad Store as well (beta). - -- :simple-nextcloud:{ .lg .middle } Nextcloud +- :simple-nextcloud:{ .lg .middle } Nextcloud --- Use a Nextcloud instance to [download files](../../build/reference/customtask/cmem_plugin_nextcloud-Download.md) to process them or [upload files](../../build/reference/customtask/cmem_plugin_nextcloud-Upload.md) you created with Corporate Memory. - -- :material-microsoft-office:{ .lg .middle } Office 365 +- :material-microsoft-office:{ .lg .middle } Office 365 --- Use the [Excel (OneDrive, Office365)](../../build/reference/dataset/office365preadsheet.md) to read and write to Excel workbooks in Office 365. - -- :simple-ollama:{ .lg .middle } Ollama +- :simple-ollama:{ .lg .middle } Ollama --- Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task to interact with Ollama provided Large Language Models (LLMs). - -- :simple-openai:{ .lg .middle } OpenAI +- :simple-openai:{ .lg .middle } OpenAI --- Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task to interact with any [OpenAI provided Large Language Models](https://platform.openai.com/docs/models) (LLMs). - -- :octicons-ai-model-24:{ .lg .middle } OpenRouter +- :octicons-ai-model-24:{ .lg .middle } OpenRouter --- Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task to interact with any [OpenRouter provided Large Language Models](https://openrouter.ai/models) (LLMs). - -- :other-apacheorc:{ .lg .middle } ORC +- :other-apacheorc:{ .lg .middle } ORC --- Use the [ORC](../../build/reference/dataset/orc.md) dataset to read and write files in the [ORC](https://orc.apache.org/) format. - -- :simple-apacheparquet:{ .lg .middle } Parquet +- :simple-apacheparquet:{ .lg .middle } Parquet --- Use the [Parquet](../../build/reference/dataset/parquet.md) dataset to read and write files in the [Parquet](https://parquet.apache.org/) format. - -- :black_large_square:{ .lg .middle } pgvector +- :black_large_square:{ .lg .middle } pgvector --- Store vector embeddings into [pgvector](https://github.com/pgvector/pgvector) using the [Search Vector Embeddings](../../build/reference/customtask/cmem_plugin_pgvector-Search.md). - -- :simple-postgresql:{ .lg .middle } PostgreSQL +- :simple-postgresql:{ .lg .middle } PostgreSQL --- PostgreSQL can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://central.sonatype.com/artifact/org.postgresql/postgresql/versions). - -- :other-powerbi:{ .lg .middle } PowerBI +- :other-powerbi:{ .lg .middle } PowerBI --- Leverage your Knowledge Graphs in PowerBI by using our [Corporate Memory Power-BI-Connector](../../consume/consuming-graphs-in-power-bi/index.md). - -- :other-qlever:{ .lg .middle } Qlever +- :other-qlever:{ .lg .middle } Qlever --- @@ -269,8 +238,7 @@ Query data from Qlever by using the SPARQL [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. Qlever can be used as the integrated Quad Store as well (beta). - -- :simple-semanticweb:{ .lg .middle } RDF +- :simple-semanticweb:{ .lg .middle } RDF --- @@ -279,61 +247,53 @@ Qlever can be used as the integrated Quad Store as well (beta). [Turtle](https://www.w3.org/TR/turtle/), [RDF/XML](https://www.w3.org/TR/rdf-syntax-grammar/) or [RDF/JSON](https://www.w3.org/TR/rdf-json/)). - -- :other-redash:{ .lg .middle } Redash +- :other-redash:{ .lg .middle } Redash --- Leverage your Knowledge Graphs in Redash using the integrated [Corporate Memory Redash-Connector](../../consume/consuming-graphs-with-redash/index.md). - -- :material-application-braces-outline:{ .lg .middle } REST +- :material-application-braces-outline:{ .lg .middle } REST --- Execute REST requests using [Execute REST requests](../../build/reference/customtask/eccencaRestOperator.md). - -- :fontawesome-brands-salesforce:{ .lg .middle } Salesforce +- :fontawesome-brands-salesforce:{ .lg .middle } Salesforce --- Interact with your Salesforce data, such as [Create/Update Salesforce Objects](../../build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md) or execute a [SOQL query (Salesforce)](../../build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md). - -- :simple-snowflake:{ .lg .middle } Snowflake +- :simple-snowflake:{ .lg .middle } Snowflake --- Snowflake can be accessed with the [Snowflake SQL endpoint](../../build/reference/dataset/SnowflakeJdbc.md) dataset and a [JDBC driver](https://central.sonatype.com/artifact/net.snowflake/snowflake-jdbc). - -- :simple-apachespark:{ .lg .middle } Spark +- :simple-apachespark:{ .lg .middle } Spark --- Apply a [Spark](https://spark.apache.org/) function to a specified field using [Execute Spark function](../../build/reference/customtask/SparkFunction.md). - -- :simple-sqlite:{ .lg .middle } SQLite +- :simple-sqlite:{ .lg .middle } SQLite --- SQLite can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://central.sonatype.com/artifact/org.xerial/sqlite-jdbc). - -- :material-ssh:{ .lg .middle } SSH +- :material-ssh:{ .lg .middle } SSH --- Interact with SSH servers to [Download SSH files](../../build/reference/customtask/cmem_plugin_ssh-Download.md) or [Execute commands via SSH](../../build/reference/customtask/cmem_plugin_ssh-Execute.md). - -- :other-tentris:{ .lg .middle } Tentris +- :other-tentris:{ .lg .middle } Tentris --- @@ -344,16 +304,14 @@ Query data from Tentris by using the SPARQL [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. Tentris can be used as the integrated Quad Store as well (beta). - -- :simple-trino:{ .lg .middle } Trino +- :simple-trino:{ .lg .middle } Trino --- [Trino](https://github.com/trinodb/trino) can be access with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://trino.io/docs/current/client/jdbc.html). - -- :black_large_square:{ .lg .middle } Virtuoso +- :black_large_square:{ .lg .middle } Virtuoso --- @@ -364,29 +322,23 @@ Query data from Virtuoso by using the SPARQL [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. Virtuoso can be used as the integrated Quad Store as well (beta). - -- :material-xml:{ .lg .middle } XML +- :material-xml:{ .lg .middle } XML --- Load and write data to XML files with the [XML](../../build/reference/dataset/xml.md) dataset as well as [Parse XML](../../build/reference/customtask/XmlParserOperator.md) from external services. - -- :simple-yaml:{ .lg .middle } YAML +- :simple-yaml:{ .lg .middle } YAML --- Load and integrate data from YAML files with the [Parse YAML](../../build/reference/customtask/cmem_plugin_yaml-parse.md) task. - -- :material-code-json:{ .lg .middle } Zipped JSON +- :material-code-json:{ .lg .middle } Zipped JSON --- Use the [JSON](../../build/reference/dataset/json.md) dataset to read and write JSON files in a ZIP Archive. - - - -
\ No newline at end of file + diff --git a/docs/build/kafka-consumer/index.md b/docs/build/kafka-consumer/index.md index 3d2f7fb98..907a6abe5 100644 --- a/docs/build/kafka-consumer/index.md +++ b/docs/build/kafka-consumer/index.md @@ -53,18 +53,18 @@ In Create new item window, select Kafka Consumer (Receive Messages) and click Ad Configure the Kafka Consumer according to the topic that shall be consumed: -- **Bootstrap Server** - URL of the Kafka broker including the port number (commonly port ´9092) -- **Security Protocol** - Security mechanism used for authentication -- **Topic** - Name / ID of the topic where messages are published -- **Advanced Section** - - **Messages Dataset** - A dataset (XML/JSON) where messages can be written to. Leave this field empty to output the messages as entities (see below). - - **SASL** authentication settings as provided by your Kafka broker - - **Auto Offset Reset** - Consumption starts either at the earliest offset or the latest offset. - - **Consumer Group Name** - Consumer groups can be used to distribute the load of messages (partitions) between multiple consumers of the same group (c.f. [Kafka Concepts](https://docs.confluent.io/platform/current/clients/consumer.html#concepts)). - - **Client Id** - An optional identifier of the client which is communicated to the server. When this field is empty, the plugin defaults to `DNS:PROJECT_ID:TASK_ID`. - - **Local Consumer Queue Size** - Maximum total message size in kilobytes that the consumer can buffer for a specific partition. The consumer will stop fetching from the partition if it hits this limit. This helps prevent consumers from running out of memory. - - **Message Limit** - The maximum number of messages to fetch and process in each run. If `0` or less, all messages will be fetched. - - **Disable Commit** Setting this to `true` will disable committing messages after retrival. This means you will receive the same messages on the next execution (for testing, development, or debugging). +- **Bootstrap Server** - URL of the Kafka broker including the port number (commonly port ´9092) +- **Security Protocol** - Security mechanism used for authentication +- **Topic** - Name / ID of the topic where messages are published +- **Advanced Section** + - **Messages Dataset** - A dataset (XML/JSON) where messages can be written to. Leave this field empty to output the messages as entities (see below). + - **SASL** authentication settings as provided by your Kafka broker + - **Auto Offset Reset** - Consumption starts either at the earliest offset or the latest offset. + - **Consumer Group Name** - Consumer groups can be used to distribute the load of messages (partitions) between multiple consumers of the same group (c.f. [Kafka Concepts](https://docs.confluent.io/platform/current/clients/consumer.html#concepts)). + - **Client Id** - An optional identifier of the client which is communicated to the server. When this field is empty, the plugin defaults to `DNS:PROJECT_ID:TASK_ID`. + - **Local Consumer Queue Size** - Maximum total message size in kilobytes that the consumer can buffer for a specific partition. The consumer will stop fetching from the partition if it hits this limit. This helps prevent consumers from running out of memory. + - **Message Limit** - The maximum number of messages to fetch and process in each run. If `0` or less, all messages will be fetched. + - **Disable Commit** Setting this to `true` will disable committing messages after retrival. This means you will receive the same messages on the next execution (for testing, development, or debugging). ![Configuration options](configure-kafka-consumer.png){ class="bordered" } @@ -86,11 +86,11 @@ To execute the Kafka Consumer it needs to be placed inside a Workflow. The messa In the "message streaming mode" (**Messages Dataset** is not set) the received messages will be generated as entities and forwarded to the subsequent operator in the workflow. This mode is not limited to any message format. The generated message entities will have the following flat schema: -- **key** — the optional key of the message, -- **content** — the message itself as plain text, -- **offset** — the given offset of the message in the topic, -- **ts-production** — the timestamp when the message was written to the topic, -- **ts-consumption** — the timestamp when the message was consumed from the topic. +- **key** — the optional key of the message, +- **content** — the message itself as plain text, +- **offset** — the given offset of the message in the topic, +- **ts-production** — the timestamp when the message was written to the topic, +- **ts-consumption** — the timestamp when the message was consumed from the topic. Connect the output of Kafka Consumer inside a Workflow to a tabular dataset (e.g. a [CSV Dataset](../reference/dataset/csv.md)) or directly to a transformation task. diff --git a/docs/build/lift-data-from-json-and-xml-sources/index.md b/docs/build/lift-data-from-json-and-xml-sources/index.md index eed445373..ea2d30af8 100644 --- a/docs/build/lift-data-from-json-and-xml-sources/index.md +++ b/docs/build/lift-data-from-json-and-xml-sources/index.md @@ -30,11 +30,11 @@ The documentation consists of the following steps, which are described in detail The following material is used in this tutorial: -- Sample vocabulary describing the data in the JSON and XML files: [products_vocabulary.nt](products_vocabulary.nt) +- Sample vocabulary describing the data in the JSON and XML files: [products_vocabulary.nt](products_vocabulary.nt) ![Visualization of the "Products Vocabulary".](products-vocab-xml+json.png){ class="bordered" } -- Sample JSON file: [services.json](services.json) +- Sample JSON file: [services.json](services.json) ```json [ @@ -56,7 +56,7 @@ The following material is used in this tutorial: ] ``` -- Sample XML file: [orgmap.xml](orgmap.xml) +- Sample XML file: [orgmap.xml](orgmap.xml) ```xml @@ -119,9 +119,9 @@ The vocabulary contains the classes and properties needed to map the source data 3. Define a **Name**, a **Graph URI** and a **Description** of the vocabulary. _In this example we will use:_ - - Name: _**Product Vocabulary**_ - - Graph URI: _****_ - - Description: _**Example vocabulary modeled to describe relations between products and services.**_ + - Name: _**Product Vocabulary**_ + - Graph URI: _****_ + - Description: _**Example vocabulary modeled to describe relations between products and services.**_ ![Dialog to register a new vocabulary.](dialog-register-new-vocabulary.png){ class="bordered" width="50%" } @@ -339,8 +339,8 @@ Click **Transform evaluation** to evaluate the transformed entities. 2. Press the ![Button play](button-play-xml+json.png) button and validate the results. In this example, 9x Service entities were created in our Knowledge Graph based on the mapping. 3. You can click **Knowledge Graphs** under **EXPLORE** to (re-)view of the created Knowledge Graphs 4. Enter the following URIs in the Enter search term for JSON and XML respectively. - - JSON / Service: _****_ - - XML / Department: _****_ + - JSON / Service: _****_ + - XML / Department: _****_ === "JSON" diff --git a/docs/build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md b/docs/build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md index e6788d6ba..fe765c3b8 100644 --- a/docs/build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md +++ b/docs/build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md @@ -33,16 +33,15 @@ The documentation consists of the following steps, which are described in detail 5. Evaluate a Transformation 6. Build the Knowledge Graph - ## Sample Material The following material is used in this tutorial, you should download the files and have them at hand throughout the tutorial: -- Sample vocabulary which describes the data in the CSV files: [products_vocabulary.nt](products_vocabulary.nt) +- Sample vocabulary which describes the data in the CSV files: [products_vocabulary.nt](products_vocabulary.nt) ![](products-vocab.png){ class="bordered" } -- Sample CSV file: [services.csv](services.csv) +- Sample CSV file: [services.csv](services.csv) !!! info @@ -52,7 +51,7 @@ The following material is used in this tutorial, you should download the files a | I241-8776317 | Component Confabulation | Z249-1364492, L557-1467804, C721-7900144, ... | Corinna.Ludwig@company.org | 1082,00 EUR | | … | … | … | … | … | -- Sample Excel file: [products.xlsx](products.xlsx) +- Sample Excel file: [products.xlsx](products.xlsx) !!! info @@ -87,7 +86,6 @@ The vocabulary contains the classes and properties needed to map the data into t ![Register new Vocabulary](register-new-vocab.png){ class="bordered" width="50%" } - === "cmemc" ``` shell-session @@ -102,7 +100,7 @@ The vocabulary contains the classes and properties needed to map the data into t ![](menu-build-projects.png){ class="bordered" width="50%" } -2. Click **Create :octicons-plus-circle-24:** at the top right of the page.  +2. Click **Create :octicons-plus-circle-24:** at the top right of the page. 3. In the **Create new item** window, select **Project** and click **Add**. The Create new item of type Project window appears.   @@ -110,7 +108,6 @@ The vocabulary contains the classes and properties needed to map the data into t 5. Click **Create**. Your project is created. - --- === "Workflow view" @@ -198,7 +195,6 @@ The transformation defines how an input dataset (e.g. CSV) will be transformed i ![](transformation-label.png){ class="bordered" width="50%" } - 3. Scroll down to **Target vocabularies** and choose **Products vocabulary**. ![](select-vocabulary.png){ class="bordered" width="50%" } @@ -219,13 +215,13 @@ The transformation defines how an input dataset (e.g. CSV) will be transformed i 4. Define the **Target entity type** from the vocabulary, the **URI pattern** and a **label** for the mapping. _In this example we will use:_ - - Target entity type: _**Service**_ - - URI pattern: + - Target entity type: _**Service**_ + - URI pattern: - - Click **Create custom pattern** - - Insert `http://ld.company.org/prod-inst/{ServiceID}`, where `http://ld.company.org/prod-inst/` is a common prefix for the instances in this use case, and `{ServiceID}` is a placeholder that will resolve to the column of that name. + - Click **Create custom pattern** + - Insert `http://ld.company.org/prod-inst/{ServiceID}`, where `http://ld.company.org/prod-inst/` is a common prefix for the instances in this use case, and `{ServiceID}` is a placeholder that will resolve to the column of that name. - - An optional Label: `Service` + - An optional Label: `Service` ![](services-mapping-class.png){ class="bordered" width="50%" } @@ -237,26 +233,26 @@ _Example RDF triple in our Knowledge Graph based on the mapping definition:_ ``` -6. Evaluate your mapping by clicking the Expand :material-greater-than: button in the **Examples of target data** property to see at most three generated base URIs. +1. Evaluate your mapping by clicking the Expand :material-greater-than: button in the **Examples of target data** property to see at most three generated base URIs. ![](mapping-inline-preview.png){ class="bordered" width="50%" } We have now created the Service entities in the Knowledge Graph. As a next step, we will add the name of the Service entity. -7. Press the circular **Blue + button** on the lower right and select **Add value mapping**. +2. Press the circular **Blue + button** on the lower right and select **Add value mapping**. ![](services-mapping-add-rule.png){ class="bordered" width="50%" } -8. Define the **Target property**, the **Data type**, the **Value path** (column name) and a **Label** for your value mapping. _In this example we will use:_ +3. Define the **Target property**, the **Data type**, the **Value path** (column name) and a **Label** for your value mapping. _In this example we will use:_ - - Target Property: `name` - - Data type: _**String**_ - - Value path: `ServiceName` (which corresponds to the column of that name) - - An optional Label: `service name` + - Target Property: `name` + - Data type: _**String**_ + - Value path: `ServiceName` (which corresponds to the column of that name) + - An optional Label: `service name` ![](services-mapping-rule-edit.png){ class="bordered" width="50%" } -9. Click **Save**. +4. Click **Save**. --- @@ -266,7 +262,6 @@ Go the **Transform evaluation** tab of your transformation to view a list of gen ![](mapping-evaluation.png){ class="bordered" width="50%" } - --- ## 6 Build the Knowledge Graph @@ -279,8 +274,8 @@ Go the **Transform evaluation** tab of your transformation to view a list of gen 3. Define a **Label** for the Knowledge Graph and provide a **graph** uri. Leave all the other parameters at the default values. _In this example we will use:_ - - Label: `Service Knowledge Graph` - - Graph: `http://ld.company.org/prod-instances/` + - Label: `Service Knowledge Graph` + - Graph: `http://ld.company.org/prod-instances/` ![](knowledge-graph.png){ class="bordered" width="50%" } @@ -292,7 +287,6 @@ Go the **Transform evaluation** tab of your transformation to view a list of gen ![](mapping-execution-result.png){ class="bordered" width="50%" } - 7. Click Knowledge Graph under **Explore** in the navigation on the left side of the page. ![](explore-knowledge-graph.png){ class="bordered" width="50%" } @@ -308,4 +302,3 @@ Go the **Transform evaluation** tab of your transformation to view a list of gen 10. Finally you can use the Explore **Knowledge Graphs** module to (re-)view of the created Knowledge Graph: `http://ld.company.org/prod-instances/` ![](kg-result.png){ class="bordered" width="50%" } - diff --git a/docs/build/loading-jdbc-datasets-incrementally/index.md b/docs/build/loading-jdbc-datasets-incrementally/index.md index fd35cc182..7e6e2c9d1 100644 --- a/docs/build/loading-jdbc-datasets-incrementally/index.md +++ b/docs/build/loading-jdbc-datasets-incrementally/index.md @@ -45,15 +45,15 @@ To extract data from a relational database, you need to first register a **JDBC ![Create JDBC Dataset](create-dataset-JDBC.png){ class="bordered" } 5. Provide the required configuration details for the JDBC endpoint: - - **Label**: Provide a table name. - - **Description:** Optionally describe your table. - - **JDBC Driver Connection URL:** Provide the JDBC connection. In this tutorial we use a MySQL database. The database server is named _mysql_ and the database is named _serviceDB_. - - **Table:** Provide the name of the table in the database. - - **Source query**: Provide a default source query. In this tutorial, the source query will be modified later as the OFFSET changes. - - **Limit:** Provide a LIMIT for the SQL query. In this tutorial, we choose 5 for demonstrating the functionality. You may select any value which works for your use case. - - **Query strategy**: Select: _Execute the given source query. No paging or virtual Query._ In this tutorial, this needs to be changed so that when this JDBC endpoint is being used, Corporate Memory will always check for the _Source Query_ that was provided earlier. - - **User**: Provide the user name which is allowed to access the database. - - **Password**: Provide the user password that is allowed to access the database. + - **Label**: Provide a table name. + - **Description:** Optionally describe your table. + - **JDBC Driver Connection URL:** Provide the JDBC connection. In this tutorial we use a MySQL database. The database server is named _mysql_ and the database is named _serviceDB_. + - **Table:** Provide the name of the table in the database. + - **Source query**: Provide a default source query. In this tutorial, the source query will be modified later as the OFFSET changes. + - **Limit:** Provide a LIMIT for the SQL query. In this tutorial, we choose 5 for demonstrating the functionality. You may select any value which works for your use case. + - **Query strategy**: Select: _Execute the given source query. No paging or virtual Query._ In this tutorial, this needs to be changed so that when this JDBC endpoint is being used, Corporate Memory will always check for the _Source Query_ that was provided earlier. + - **User**: Provide the user name which is allowed to access the database. + - **Password**: Provide the user password that is allowed to access the database. ![Configuration of a JDBC dataset](configure-JDBC-1.png){ class="bordered" } @@ -71,7 +71,7 @@ To incrementally extract data in Corporate Memory, we need to store the informat 4. Select the previously created JDBC endpoint (in our example: "Services Table (JDBC)" 5. Press the **Turtle** tab inside your JDBC endpoint view (right) -In our example, the JDBC Endpoint IRI looks like this: __IncrementalJDBCdatasetload/8d0e4895-1d45-442f-8fd8-b1459ec3dbde_ServicesTableJDBC_ +In our example, the JDBC Endpoint IRI looks like this: `` See screenshot below for example: @@ -85,7 +85,14 @@ The following three RDF triples hold the (minimal) necessary information we need 2. The second triple defines a label for the Graph. 3. The third triple defines the <...**lastOffset**> property we need for this tutorial. As a default, we set it to 0 to start with the first row in the table. -**services_metadata_graph** +For your project: + +1. adjust the CMEM DI Project IRI and +2. the JDBC endpoint IRI. + +**Import the Graph** in the Exploration tab → Graph (menu) → Add new Graph → Provide Graph IRI + Select file. + +`services_metadata_graph.nt`: ```nt @@ -99,14 +106,7 @@ The following three RDF triples hold the (minimal) necessary information we need "0" . # set the initial offset to zero to start with the first row in the table ``` -For your project, please: - -1. adjust the CMEM DI Project IRI and -2. the JDBC endpoint IRI. - -**Import the Graph** in the Exploration tab → Graph (menu) → Add new Graph → Provide Graph IRI + Select file - -In our example, we used the following Graph IRI for the Metadata Graph: __ +In our example, we used the following Graph IRI for the Metadata Graph: `` ## 3 Create a Transformation to dynamically compose a SQL Query diff --git a/docs/build/mapping-creator/index.md b/docs/build/mapping-creator/index.md index 0f0193246..a15409dcb 100644 --- a/docs/build/mapping-creator/index.md +++ b/docs/build/mapping-creator/index.md @@ -34,9 +34,9 @@ Using visual tools, drag-and-drop, and suggestions, you can create mappings betw The Mapping Creator consists of three parts: -- Source schema shown on the left side -- Target Schema shown on the right side -- Mappings between elements in the source schema and in the target schema +- Source schema shown on the left side +- Target Schema shown on the right side +- Mappings between elements in the source schema and in the target schema You can move, connect or disconnect, and inspect each element visually. @@ -65,8 +65,8 @@ To complete a mapping, properties need to be added to complete your desired targ There are two options to add properties: -- during class selection -- from vocabularies +- during class selection +- from vocabularies ##### During class selection @@ -74,9 +74,9 @@ There are two options to add properties: In the _add target class_ dialog you may select different kind of properties: -- class properties - properties defined in the domain of the selected class or its super-classes -- default properties - typical well-known properties like `rdfs:label` or `rdfs:comment` -- generic properties - properties defined with no explicit domain (or in domain of `owl:Thing`) +- class properties - properties defined in the domain of the selected class or its super-classes +- default properties - typical well-known properties like `rdfs:label` or `rdfs:comment` +- generic properties - properties defined with no explicit domain (or in domain of `owl:Thing`) The property preview helps to confirm your choice. @@ -86,8 +86,8 @@ The property preview helps to confirm your choice. The _add property from vocabularies_ dialog allows you to search and select a property and to configure it in the desired way: -- redefine the role of a property, to use a DatatypeProperty in the role of an ObjectProperty, or vice versa -- define the _direction_ an ObjectProperty should be used in +- redefine the role of a property, to use a DatatypeProperty in the role of an ObjectProperty, or vice versa +- define the _direction_ an ObjectProperty should be used in #### Create direct mappings diff --git a/docs/build/reference/aggregator/average.md b/docs/build/reference/aggregator/average.md index 3ca9efb0c..689aa6942 100644 --- a/docs/build/reference/aggregator/average.md +++ b/docs/build/reference/aggregator/average.md @@ -7,8 +7,6 @@ tags: # Average - - Computes the weighted average. ## Examples @@ -21,7 +19,6 @@ Computes the weighted average. * Input values: `[0.4, 0.5, 0.9]` * Returns: `0.6` - --- **Multiplies individual similarity scores with their weight before averaging:** @@ -29,20 +26,16 @@ Computes the weighted average. * Input values: `[0.3, 0.5, 0.6]` * Returns: `0.5` - --- **Missing scores always lead to an output of none:** * Input values: `[-1.0, null, 1.0]` * Returns: `null` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/aggregator/firstNonEmpty.md b/docs/build/reference/aggregator/firstNonEmpty.md index 3b6165fcb..347595037 100644 --- a/docs/build/reference/aggregator/firstNonEmpty.md +++ b/docs/build/reference/aggregator/firstNonEmpty.md @@ -7,8 +7,6 @@ tags: # First non-empty score - - Forwards the first input that provides a non-empty similarity score. ## Examples @@ -21,13 +19,10 @@ Forwards the first input that provides a non-empty similarity score. * Input values: `[null, 0.2, 0.5]` * Returns: `0.2` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/aggregator/geometricMean.md b/docs/build/reference/aggregator/geometricMean.md index 11bbed8a3..2fa2d70e4 100644 --- a/docs/build/reference/aggregator/geometricMean.md +++ b/docs/build/reference/aggregator/geometricMean.md @@ -7,8 +7,6 @@ tags: # Geometric mean - - Compute the (weighted) geometric mean. ## Examples @@ -22,7 +20,6 @@ Compute the (weighted) geometric mean. * Input values: `[0.0, 0.0, 0.0]` * Returns: `0.0` - --- **Example 2:** @@ -30,7 +27,6 @@ Compute the (weighted) geometric mean. * Input values: `[1.0, 1.0, 1.0]` * Returns: `1.0` - --- **Example 3:** @@ -38,7 +34,6 @@ Compute the (weighted) geometric mean. * Input values: `[0.5, 1.0]` * Returns: `0.629961` - --- **Example 4:** @@ -46,7 +41,6 @@ Compute the (weighted) geometric mean. * Input values: `[0.5, 1.0, 0.7]` * Returns: `0.672866` - --- **Example 5:** @@ -54,20 +48,16 @@ Compute the (weighted) geometric mean. * Input values: `[0.1, 0.9, 0.2]` * Returns: `0.153971` - --- **Missing scores always lead to an output of none:** * Input values: `[-1.0, null, 1.0]` * Returns: `null` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/aggregator/handleMissingValues.md b/docs/build/reference/aggregator/handleMissingValues.md index 28ecf268c..39e97f1ea 100644 --- a/docs/build/reference/aggregator/handleMissingValues.md +++ b/docs/build/reference/aggregator/handleMissingValues.md @@ -7,8 +7,6 @@ tags: # Handle missing values - - Generates a default similarity score, if no similarity score is provided (e.g., due to missing values). Using this operator can have a performance impact, since it lowers the efficiency of the underlying computation. ## Examples @@ -21,7 +19,6 @@ Generates a default similarity score, if no similarity score is provided (e.g., * Input values: `[0.1]` * Returns: `0.1` - --- **Outputs the default score, if no input score is provided:** @@ -31,23 +28,16 @@ Generates a default similarity score, if no similarity score is provided (e.g., * Input values: `[null]` * Returns: `1.0` - - - ## Parameter ### Default value The default value to be generated, if no similarity score is provided. Must be a value between -1 (inclusive) and 1 (inclusive). '1' represents boolean true and '-1' represents boolean false. -- ID: `defaultValue` -- Datatype: `double` -- Default Value: `-1.0` - - - - +* ID: `defaultValue` +* Datatype: `double` +* Default Value: `-1.0` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/aggregator/max.md b/docs/build/reference/aggregator/max.md index 9d3a55ef5..9f57d104b 100644 --- a/docs/build/reference/aggregator/max.md +++ b/docs/build/reference/aggregator/max.md @@ -7,8 +7,6 @@ tags: # Or - - At least one input score must be within the threshold. Selects the maximum score. ## Examples @@ -21,21 +19,18 @@ At least one input score must be within the threshold. Selects the maximum score * Input values: `[0.5, 0.0]` * Returns: `0.5` - --- **Selects the maximum similarity score:** * Input values: `[-1.0, -0.5, -0.3]` * Returns: `-0.3` - --- **Missing scores default to a similarity score of -1:** * Input values: `[null]` * Returns: `-1.0` - --- **Weights are ignored:** @@ -43,13 +38,10 @@ At least one input score must be within the threshold. Selects the maximum score * Input values: `[1.0, 0.0]` * Returns: `1.0` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/aggregator/min.md b/docs/build/reference/aggregator/min.md index e323f785b..03a36e0da 100644 --- a/docs/build/reference/aggregator/min.md +++ b/docs/build/reference/aggregator/min.md @@ -7,8 +7,6 @@ tags: # And - - All input scores must be within the threshold. Selects the minimum score. ## Examples @@ -21,21 +19,18 @@ All input scores must be within the threshold. Selects the minimum score. * Input values: `[1.0, 0.0]` * Returns: `0.0` - --- **Selects the minimum similarity score:** * Input values: `[-1.0, 0.0, 0.5, 1.0]` * Returns: `-1.0` - --- **Missing scores default to a similarity score of -1:** * Input values: `[1.0, null, -0.5]` * Returns: `-1.0` - --- **Weights are ignored:** @@ -43,13 +38,10 @@ All input scores must be within the threshold. Selects the minimum score. * Input values: `[1.0, 0.0]` * Returns: `0.0` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/aggregator/negate.md b/docs/build/reference/aggregator/negate.md index f1c4974eb..e333ef88d 100644 --- a/docs/build/reference/aggregator/negate.md +++ b/docs/build/reference/aggregator/negate.md @@ -7,15 +7,12 @@ tags: # Negate - - Negates the result of the input comparison. A single input is expected. Using this operator can have a performance impact, since it lowers the efficiency of the underlying computation. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/aggregator/quadraticMean.md b/docs/build/reference/aggregator/quadraticMean.md index 84e54453e..e8cca0408 100644 --- a/docs/build/reference/aggregator/quadraticMean.md +++ b/docs/build/reference/aggregator/quadraticMean.md @@ -7,8 +7,6 @@ tags: # Euclidian distance - - Calculates the Euclidian distance. ## Examples @@ -22,7 +20,6 @@ Calculates the Euclidian distance. * Input values: `[1.0, 1.0, 1.0]` * Returns: `1.0` - --- **Example 2:** @@ -30,7 +27,6 @@ Calculates the Euclidian distance. * Input values: `[1.0, 0.0]` * Returns: `0.707107` - --- **Example 3:** @@ -38,7 +34,6 @@ Calculates the Euclidian distance. * Input values: `[0.4, 0.5, 0.6]` * Returns: `0.506623` - --- **Example 4:** @@ -46,7 +41,6 @@ Calculates the Euclidian distance. * Input values: `[0.0, 0.0]` * Returns: `0.0` - --- **Example 5:** @@ -54,7 +48,6 @@ Calculates the Euclidian distance. * Input values: `[1.0, 0.0, 0.0]` * Returns: `0.707107` - --- **Example 6:** @@ -62,20 +55,16 @@ Calculates the Euclidian distance. * Input values: `[0.4, 0.5, 0.6]` * Returns: `0.538516` - --- **Missing scores always lead to an output of none:** * Input values: `[-1.0, null, 1.0]` * Returns: `null` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/aggregator/scale.md b/docs/build/reference/aggregator/scale.md index 6dbc6efb0..bfc7bc017 100644 --- a/docs/build/reference/aggregator/scale.md +++ b/docs/build/reference/aggregator/scale.md @@ -7,8 +7,6 @@ tags: # Scale - - Scales a similarity score by a factor. ## Examples @@ -24,14 +22,12 @@ Scales a similarity score by a factor. * Input values: `[1.0]` * Returns: `0.5` - --- **Ignores missing values:** * Input values: `[null]` * Returns: `null` - --- **Throws a validation error if more than one input is provided:** @@ -39,23 +35,16 @@ Scales a similarity score by a factor. * Returns: `null` * **Throws error:** `IllegalArgumentException` - - - ## Parameter ### Factor All input similarity values are multiplied with this factor. -- ID: `factor` -- Datatype: `double` -- Default Value: `1.0` - - - - +* ID: `factor` +* Datatype: `double` +* Default Value: `1.0` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/CancelWorkflow.md b/docs/build/reference/customtask/CancelWorkflow.md index 79bd3ab60..c42a879c0 100644 --- a/docs/build/reference/customtask/CancelWorkflow.md +++ b/docs/build/reference/customtask/CancelWorkflow.md @@ -8,11 +8,8 @@ tags: # Cancel Workflow - - Cancels a workflow if a specified condition is fulfilled. A typical use case for this operator is to cancel the workflow execution if the input data is empty. - ## Parameter ### Type URI @@ -23,8 +20,6 @@ The entity type to check the condition on. - Datatype: `uri` - Default Value: `None` - - ### Condition The cancellation condition @@ -33,8 +28,6 @@ The cancellation condition - Datatype: `enumeration` - Default Value: `empty` - - ### Invert condition If true, the specified condition will be inverted, i.e., the workflow execution will be cancelled if the condition is not fulfilled. @@ -43,8 +36,6 @@ If true, the specified condition will be inverted, i.e., the workflow execution - Datatype: `boolean` - Default Value: `false` - - ### Fail workflow If true, the workflow execution will fail if the condition is met. If false, the workflow execution would be stopped, but shown as successfull. @@ -53,10 +44,6 @@ If true, the workflow execution will fail if the condition is met. If false, the - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/ConcatenateToFile.md b/docs/build/reference/customtask/ConcatenateToFile.md index e2ea70dbf..cb0234e42 100644 --- a/docs/build/reference/customtask/ConcatenateToFile.md +++ b/docs/build/reference/customtask/ConcatenateToFile.md @@ -8,11 +8,8 @@ tags: # Concatenate to file - - Concatenates values into a file. - ## Parameter ### Path @@ -23,8 +20,6 @@ Values from this path will be concatenated. - Datatype: `string` - Default Value: `None` - - ### Mime type MIME type of the output file. @@ -33,8 +28,6 @@ MIME type of the output file. - Datatype: `string` - Default Value: `None` - - ### Prefix Prefix to be written before the first value. @@ -43,8 +36,6 @@ Prefix to be written before the first value. - Datatype: `multiline string` - Default Value: `None` - - ### Glue Separator to be inserted between concatenated values. @@ -53,8 +44,6 @@ Separator to be inserted between concatenated values. - Datatype: `multiline string` - Default Value: `None` - - ### Suffix Suffix to be written after the last value. @@ -63,10 +52,6 @@ Suffix to be written after the last value. - Datatype: `multiline string` - Default Value: `None` - - - - ## Advanced Parameter ### Charset @@ -77,8 +62,6 @@ The file encoding. - Datatype: `string` - Default Value: `UTF-8` - - ### File extension File extension of the output file. @@ -86,6 +69,3 @@ File extension of the output file. - ID: `fileExtension` - Datatype: `string` - Default Value: `.tmp` - - - diff --git a/docs/build/reference/customtask/CustomSQLExecution.md b/docs/build/reference/customtask/CustomSQLExecution.md index 4ba021ce8..db650fd1d 100644 --- a/docs/build/reference/customtask/CustomSQLExecution.md +++ b/docs/build/reference/customtask/CustomSQLExecution.md @@ -8,11 +8,8 @@ tags: # Spark SQL query - - Executes a custom SQL query on the first input Spark dataframe and returns the result as its output. - ## Parameter ### Command @@ -23,10 +20,6 @@ SQL command. The name of the table in the statement must be 'dataset', regardles - Datatype: `code-sql` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/DistinctBy.md b/docs/build/reference/customtask/DistinctBy.md index 695a0b1ad..24a123a56 100644 --- a/docs/build/reference/customtask/DistinctBy.md +++ b/docs/build/reference/customtask/DistinctBy.md @@ -8,11 +8,8 @@ tags: # Distinct by - - Removes duplicated entities based on a user-defined path. Note that this operator does not retain the order of the entities. - ## Parameter ### Distinct path @@ -23,8 +20,6 @@ Entities that share this path will be deduplicated. - Datatype: `string` - Default Value: `None` - - ### Resolve duplicates Strategy to resolve duplicates. @@ -33,10 +28,6 @@ Strategy to resolve duplicates. - Datatype: `enumeration` - Default Value: `keepLast` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/JsonParserOperator.md b/docs/build/reference/customtask/JsonParserOperator.md index 2c13f3fe7..4f0fe9789 100644 --- a/docs/build/reference/customtask/JsonParserOperator.md +++ b/docs/build/reference/customtask/JsonParserOperator.md @@ -8,11 +8,8 @@ tags: # Parse JSON - - Parses an incoming entity as a JSON dataset. Typically, it is used before a transformation task. Takes exactly one input of which only the first entity is processed. - ## Parameter ### Input path @@ -23,8 +20,6 @@ The Silk path expression of the input entity that contains the JSON document. If - Datatype: `string` - Default Value: `None` - - ### Base path The path to the elements to be read, starting from the root element, e.g., `/Persons/Person`. If left empty, all direct children of the root element will be read. @@ -33,8 +28,6 @@ The path to the elements to be read, starting from the root element, e.g., `/Per - Datatype: `string` - Default Value: `None` - - ### URI suffix pattern A URI pattern that is relative to the base URI of the input entity, e.g., `/{ID}`, where `{path}` may contain relative paths to elements. This relative part is appended to the input entity URI to construct the full URI pattern. @@ -43,8 +36,6 @@ A URI pattern that is relative to the base URI of the input entity, e.g., `/{ID} - Datatype: `string` - Default Value: `None` - - ### Navigate into arrays Navigate into arrays automatically. If set to false, the `#array` path operator must be used to navigate into arrays. @@ -53,10 +44,6 @@ Navigate into arrays automatically. If set to false, the `#array` path operator - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/Merge.md b/docs/build/reference/customtask/Merge.md index a2088c746..8ec6a9927 100644 --- a/docs/build/reference/customtask/Merge.md +++ b/docs/build/reference/customtask/Merge.md @@ -8,15 +8,12 @@ tags: # Join tables - - Joins a set of inputs into a single table. Expects a list of entity tables and links. All entity tables are joined into the first entity table using the provided links. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/MultiTableMerge.md b/docs/build/reference/customtask/MultiTableMerge.md index ec668adf6..36fedc632 100644 --- a/docs/build/reference/customtask/MultiTableMerge.md +++ b/docs/build/reference/customtask/MultiTableMerge.md @@ -8,11 +8,8 @@ tags: # Merge tables - - Stores sets of instance and mapping inputs as relational tables with the mapping as an n:m relation. Expects a list of entity tables and links. All entity tables have a relation to the first entity table using the provided links. - ## Parameter ### Multi table output @@ -23,8 +20,6 @@ test - Datatype: `boolean` - Default Value: `true` - - ### Pivot table name Name of the pivot table. @@ -33,8 +28,6 @@ Name of the pivot table. - Datatype: `string` - Default Value: `None` - - ### Mapping names Name of the mapping tables. Comma separated list. @@ -43,8 +36,6 @@ Name of the mapping tables. Comma separated list. - Datatype: `string` - Default Value: `None` - - ### Instance set names Name of the tables joined to the pivot. Comma separated list. @@ -53,10 +44,6 @@ Name of the tables joined to the pivot. Comma separated list. - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/Pivot.md b/docs/build/reference/customtask/Pivot.md index a64d029f5..8576d5555 100644 --- a/docs/build/reference/customtask/Pivot.md +++ b/docs/build/reference/customtask/Pivot.md @@ -8,8 +8,6 @@ tags: # Pivot - - The pivot operator takes data in separate rows, aggregates it and converts it into columns. The operator works on a flat input schema only and creates a flat output schema. @@ -26,7 +24,6 @@ The following aggregation (summary) functions are available: - **sum** - Adds up the values (works with numbers only) - **average** - Finds the average of the values (works with numbers only) - ## Parameter ### Pivot property @@ -37,8 +34,6 @@ The pivot column refers to the column in the input data that is used to organize - Datatype: `string` - Default Value: `None` - - ### First group property The name of the first group column in the range. All columns starting with this will be grouped. @@ -47,8 +42,6 @@ The name of the first group column in the range. All columns starting with this - Datatype: `string` - Default Value: `None` - - ### Last group property The name of the last group column in the range. If left empty, only the first column is grouped. @@ -57,8 +50,6 @@ The name of the last group column in the range. If left empty, only the first co - Datatype: `string` - Default Value: `None` - - ### Value property The property that contains the grouped values that will be aggregated. @@ -67,8 +58,6 @@ The property that contains the grouped values that will be aggregated. - Datatype: `string` - Default Value: `None` - - ### Aggregation function The aggregation function used to aggregate values. @@ -77,8 +66,6 @@ The aggregation function used to aggregate values. - Datatype: `enumeration` - Default Value: `sum` - - ### URI prefix Prefix to prepend to all generated pivot columns. @@ -87,10 +74,6 @@ Prefix to prepend to all generated pivot columns. - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/Scheduler.md b/docs/build/reference/customtask/Scheduler.md index ef7de7666..4ec52f9aa 100644 --- a/docs/build/reference/customtask/Scheduler.md +++ b/docs/build/reference/customtask/Scheduler.md @@ -8,8 +8,6 @@ tags: # Scheduler - - The eccenca Build plugin `Scheduler` executes a given workflow at specified intervals. ## Description @@ -46,7 +44,6 @@ _next_ period occurs. If the start time lies in the _future_, then this is simpl As mentioned, the `CancelWorkflow` plugin can be used on par in order to _cancel_ the otherwise never-ending execution of a workflow. - ## Parameter ### Workflow @@ -57,8 +54,6 @@ The name of the workflow to be executed - Datatype: `task` - Default Value: `None` - - ### Interval The interval at which the scheduler should run the referenced task. It must be in ISO-8601 duration format PnDTnHnMn.nS. @@ -67,8 +62,6 @@ The interval at which the scheduler should run the referenced task. It must be i - Datatype: `duration` - Default Value: `PT15M` - - ### Start time The time when the scheduled task is run for the first time, e.g., 2017-12-03T10:15:30. If no start time is set, midnight on the day the scheduler is started is assumed. @@ -77,8 +70,6 @@ The time when the scheduled task is run for the first time, e.g., 2017-12-03T10: - Datatype: `string` - Default Value: `None` - - ### Enabled Enables or disables the scheduler. It's enabled by default. @@ -87,8 +78,6 @@ Enables or disables the scheduler. It's enabled by default. - Datatype: `boolean` - Default Value: `true` - - ### Stop on error If set to true, this will stop the scheduler, so the failed task is not scheduled again for execution. @@ -97,10 +86,6 @@ If set to true, this will stop the scheduler, so the failed task is not schedule - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/SearchAddresses.md b/docs/build/reference/customtask/SearchAddresses.md index eb2f5b7b4..14da6ea34 100644 --- a/docs/build/reference/customtask/SearchAddresses.md +++ b/docs/build/reference/customtask/SearchAddresses.md @@ -8,9 +8,6 @@ tags: # Search addresses - - - **Configuration** The geocoding service to be queried for searches can be set up in the configuration. @@ -44,7 +41,6 @@ By default, individual requests to the geocoding service are not logged. To enab com.eccenca.di.geo=DEBUG } - ## Parameter ### Search attributes @@ -55,8 +51,6 @@ List of attributes that contain search terms. Multiple attributes (comma-separat - Datatype: `traversable[string]` - Default Value: `None` - - ### Limit Optionally limits the number of results for each search. @@ -65,10 +59,6 @@ Optionally limits the number of results for each search. - Datatype: `option[int]` - Default Value: `None` - - - - ## Advanced Parameter ### JSON-LD context @@ -79,8 +69,6 @@ Optional JSON-LD context to be used for converting the returned JSON to RDF. If - Datatype: `resource` - Default Value: `None` - - ### Additional parameters Additional URL parameters to be attached to each HTTP search request. Example: '&countrycodes=de&addressdetails=1'. Consult the API documentation for a list of available parameters. @@ -88,6 +76,3 @@ Additional URL parameters to be attached to each HTTP search request. Example: ' - ID: `additionalParameters` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/customtask/SendEMail.md b/docs/build/reference/customtask/SendEMail.md index d4c5f5fd1..d0cfc3e0d 100644 --- a/docs/build/reference/customtask/SendEMail.md +++ b/docs/build/reference/customtask/SendEMail.md @@ -8,8 +8,6 @@ tags: # Send email - - Sends an email using an SMTP server with support for both plain text and HTML formatted messages. ## Features @@ -57,7 +55,6 @@ Enable HTML formatting and use standard HTML markup in your message: ``` - ## Parameter ### Host @@ -68,8 +65,6 @@ The SMTP host, e.g, mail.myProvider.com - Datatype: `string` - Default Value: `None` - - ### Port The SMTP port @@ -78,8 +73,6 @@ The SMTP port - Datatype: `int` - Default Value: `587` - - ### User Username @@ -88,8 +81,6 @@ Username - Datatype: `string` - Default Value: `None` - - ### Password Password @@ -98,8 +89,6 @@ Password - Datatype: `password` - Default Value: `None` - - ### From The sender email address @@ -108,8 +97,6 @@ The sender email address - Datatype: `string` - Default Value: `None` - - ### To The "To" field is required to have at minimum one email address for the receiver. Multiple email addresses for several receivers are also possible. They need to be separated by commas, as in `info@example.com, john.doe@business.com`. The mailbox may be specified either as a simple address such as `info@example.com`, or in the format 'phrase + route address', as in `"Doe, John" `. Notice the quotes in the phrase `"Doe, John"`, as well as the `<` and `>` surrounding the address. For further information, see the standard for the format of Internet text messages, [RFC 822](https://datatracker.ietf.org/doc/html/rfc822). @@ -118,8 +105,6 @@ The "To" field is required to have at minimum one email address for the receiver - Datatype: `string` - Default Value: `None` - - ### CC The "CC" ('carbon copy') field is intended for the secondary recipients of the email. Otherwise, the same comments as in the "To" field, regarding receivers and formatting, are valid here. @@ -128,8 +113,6 @@ The "CC" ('carbon copy') field is intended for the secondary recipients of the e - Datatype: `string` - Default Value: `None` - - ### BCC The "BCC" ('blind carbon copy') field is reserved for the anonymous recipients of the email. The recipients contained in this field will not be included in the messages sent to the primary (i.e. "To") and secondary (i.e. "Cc" and the other "Bcc") recipients. Otherwise, the same comments as in the "To" field, regarding receivers and formatting, are valid here. @@ -138,8 +121,6 @@ The "BCC" ('blind carbon copy') field is reserved for the anonymous recipients o - Datatype: `string` - Default Value: `None` - - ### Subject The email subject @@ -148,8 +129,6 @@ The email subject - Datatype: `string` - Default Value: `Dataset` - - ### Message The email text message @@ -158,8 +137,6 @@ The email text message - Datatype: `code-html` - Default Value: `None` - - ### With HTML formatting When enabled, the email text message will be HTML formatted. Otherwise, it's treated as plain text. @@ -168,8 +145,6 @@ When enabled, the email text message will be HTML formatted. Otherwise, it's tre - Datatype: `boolean` - Default Value: `false` - - ### With attachment If enabled a file from the input is attached to the email. A single input to this operator is expected that provides a file, e.g. a file based dataset (XML, JSON etc.). @@ -178,8 +153,6 @@ If enabled a file from the input is attached to the email. A single input to thi - Datatype: `boolean` - Default Value: `false` - - ### Force SSL When enabled a SSL/TLS connection will be forced from the start without negotiation with the server. Not to be confused with STARTTLS which upgrades an insecure connection to a SSL/TLS connection, which is done by default. @@ -188,8 +161,6 @@ When enabled a SSL/TLS connection will be forced from the start without negotiat - Datatype: `boolean` - Default Value: `false` - - ### Read email properties from input When enabled this allows to send multiple emails. All email configurations are input via the first operator input with each entry representing a different email. The optional second input can be a file based dataset for the attachment. Email parameters that can be overwritten are: from, receiver, cc, bcc, subject and message. @@ -198,8 +169,6 @@ When enabled this allows to send multiple emails. All email configurations are i - Datatype: `boolean` - Default Value: `false` - - ### Delay between emails (ms) The delay in milliseconds between sending two consecutive emails. This applies to the retry mechanism, but also to sending multiple emails. @@ -208,10 +177,6 @@ The delay in milliseconds between sending two consecutive emails. This applies t - Datatype: `int` - Default Value: `2` - - - - ## Advanced Parameter ### Timeout @@ -222,8 +187,6 @@ Timeout in milliseconds to establish a connection or wait for a server response. - Datatype: `int` - Default Value: `10000` - - ### Number of retries The number of retries per email when send errors are encountered. @@ -231,6 +194,3 @@ The number of retries per email when send errors are encountered. - ID: `nrRetries` - Datatype: `int` - Default Value: `2` - - - diff --git a/docs/build/reference/customtask/SparkFunction.md b/docs/build/reference/customtask/SparkFunction.md index 74ab1ce8b..9197aeadc 100644 --- a/docs/build/reference/customtask/SparkFunction.md +++ b/docs/build/reference/customtask/SparkFunction.md @@ -8,15 +8,12 @@ tags: # Execute Spark function - - Applies a specified Scala function to a specified field. Example: Let the input field be `"name"`, the inputFunction `"""any => "Arrrrgh!"""`, and the alias `xxx`. In this example, a Spark query corresponding to `SELECT existingField1, existingField2, ... "Arrrrgh!" as "xxx"` will be generated. If the `alias` parameter is empty, the input field will be overwritten. Otherwise, a new field will be added and the rest of the schema stays the same. - ## Parameter ### Function @@ -27,8 +24,6 @@ Scala function expression. - Datatype: `multiline string` - Default Value: `None` - - ### Input field Input field. @@ -37,8 +32,6 @@ Input field. - Datatype: `string` - Default Value: `None` - - ### Alias Alias. @@ -47,10 +40,6 @@ Alias. - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/Template.md b/docs/build/reference/customtask/Template.md index b2710aad2..8dbf6b24b 100644 --- a/docs/build/reference/customtask/Template.md +++ b/docs/build/reference/customtask/Template.md @@ -8,9 +8,6 @@ tags: # Evaluate template - - - The template operator supports the Jinja templating language. Documentation about Jinja can be found in the official [Template Designer Documentation](https://jinja.palletsprojects.com/en/2.11.x/templates/). Note that support for RDF properties is limited, because Jinja does not support some special characters (in particula colons) in variable names. This makes it impractical to access RDF properties. For this reason, the transformation that precedes the template operator needs to make sure that it generates attributes that are valid Jinja variable names. @@ -26,7 +23,7 @@ For each input entity, a output entity is generated that provides a single outpu If 'full evaluation' is enabled, the entire input set will be evaluated at once. -The entities variable will contain all input entities and can be iterated over: +The entities variable will contain all input entities and can be iterated over: {% for entity in entities %} {{entity.property}} @@ -47,7 +44,6 @@ Example iterating over an sequence of books that each contains a list of chapter In this example, the child mapping defines a `chapter` target property from which it is accessible from the root entities. If the child mapping allows multiple entities, the value of the property will be a list of entities. - ## Parameter ### Template @@ -58,8 +54,6 @@ The template - Datatype: `template` - Default Value: `None` - - ### Language The template language. Currently, Jinja is supported. @@ -68,8 +62,6 @@ The template language. Currently, Jinja is supported. - Datatype: `string` - Default Value: `jinja` - - ### Output attribute The attribute in the output that will hold the evaluated template. @@ -78,8 +70,6 @@ The attribute in the output that will hold the evaluated template. - Datatype: `string` - Default Value: `output` - - ### Full evaluation If enabled, the entire input set will be evaluated at once. The template will receive a hierarchical 'entities' variable that can be iterated over. A single output entity will be generated that contains the evaluated template. @@ -88,8 +78,6 @@ If enabled, the entire input set will be evaluated at once. The template will re - Datatype: `boolean` - Default Value: `false` - - ### Forward input attributes If true, the input attributes will be forwarded to the output. @@ -98,10 +86,6 @@ If true, the input attributes will be forwarded to the output. - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/Unpivot.md b/docs/build/reference/customtask/Unpivot.md index 6427e419d..32a0ab22e 100644 --- a/docs/build/reference/customtask/Unpivot.md +++ b/docs/build/reference/customtask/Unpivot.md @@ -8,11 +8,8 @@ tags: # Unpivot - - Given a list of table columns, transforms those columns into attribute-value pairs. - ## Parameter ### First pivot property @@ -23,8 +20,6 @@ The name of the first pivot column in the range. - Datatype: `string` - Default Value: `None` - - ### Last pivot property the name of the last pivot column in the range. If left empty, all columns starting with the first pivot column are used. @@ -33,8 +28,6 @@ the name of the last pivot column in the range. If left empty, all columns start - Datatype: `string` - Default Value: `None` - - ### Attribute property The URI of the output column used to hold the attribute. @@ -43,8 +36,6 @@ The URI of the output column used to hold the attribute. - Datatype: `string` - Default Value: `attribute` - - ### Value property The URI of the output column used to hold the value. @@ -53,8 +44,6 @@ The URI of the output column used to hold the value. - Datatype: `string` - Default Value: `value` - - ### Pivot columns Comma separated list of pivot column names. This property will override all inferred columns of the first two arguments. @@ -63,10 +52,6 @@ Comma separated list of pivot column names. This property will override all infe - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/XmlParserOperator.md b/docs/build/reference/customtask/XmlParserOperator.md index 408cd7539..f85693747 100644 --- a/docs/build/reference/customtask/XmlParserOperator.md +++ b/docs/build/reference/customtask/XmlParserOperator.md @@ -8,11 +8,8 @@ tags: # Parse XML - - Takes exactly one input and reads either the defined inputPath or the first value of the first entity as XML document. Then executes the given output entity schema similar to the XML dataset to construct the result entities. - ## Parameter ### Input path @@ -23,8 +20,6 @@ The Silk path expression of the input entity that contains the XML document. If - Datatype: `string` - Default Value: `None` - - ### Base path The path to the elements to be read, starting from the root element, e.g., '/Persons/Person'. If left empty, all direct children of the root element will be read. @@ -33,8 +28,6 @@ The path to the elements to be read, starting from the root element, e.g., '/Per - Datatype: `string` - Default Value: `None` - - ### URI suffix pattern A URI pattern that is relative to the base URI of the input entity, e.g., /{ID}, where {path} may contain relative paths to elements. This relative part is appended to the input entity URI to construct the full URI pattern. @@ -43,10 +36,6 @@ A URI pattern that is relative to the base URI of the input entity, e.g., /{ID}, - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/addProjectFiles.md b/docs/build/reference/customtask/addProjectFiles.md index b58d93b1c..8316f77a9 100644 --- a/docs/build/reference/customtask/addProjectFiles.md +++ b/docs/build/reference/customtask/addProjectFiles.md @@ -8,11 +8,8 @@ tags: # Add project files - - Adds file resources to the project that are piped into the input port. - ## Parameter ### File name @@ -23,8 +20,6 @@ File name of the uploaded file(s). If multiple files are uploaded, an index will - Datatype: `string` - Default Value: `None` - - ### Directory Directory to which the files should be written. If left empty, the files will be uploaded to the project root directory. Note that all files will be written to this directory even if they have been read from a different project directory initially. @@ -33,8 +28,6 @@ Directory to which the files should be written. If left empty, the files will be - Datatype: `string` - Default Value: `None` - - ### Overwrite strategy The strategy to use if a file with the same name already exists. @@ -43,10 +36,6 @@ The strategy to use if a file with the same name already exists. - Datatype: `enumeration` - Default Value: `fail` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem-plugin-jq-workflow.md b/docs/build/reference/customtask/cmem-plugin-jq-workflow.md index da9ac73d7..2b4a4356e 100644 --- a/docs/build/reference/customtask/cmem-plugin-jq-workflow.md +++ b/docs/build/reference/customtask/cmem-plugin-jq-workflow.md @@ -15,14 +15,13 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - > [jq](https://jqlang.org/) is like sed for JSON data - you can use it to > slice and filter and map and transform structured data with the same ease that sed, awk, > grep and friends let you play with text. In order to test jq expressions, you can use [play.jqlang.org](https://play.jqlang.org/). -## Basic concepts: +## Basic concepts - Filters separated by a comma will produce multiple independent outputs: `,` - Will ignores error if the type is unexpected: `?` @@ -74,8 +73,6 @@ Types can be `arrays`, `objects`, `iterables`, `booleans`, `numbers`, `normals`, - Remove duplicates: `unique` or `unique_by(.foo)` or `unique_by(length)` - Reverse an array: `reverse` - - ## Parameter ### jq Expression @@ -86,19 +83,10 @@ The jq program to apply to the input JSON string. - Datatype: `string` - Default Value: `.` - - - - ## Advanced Parameter ### JSON source which can be used with the validate expression action - - - ID: `validation_source` - Datatype: `code-json` - Default Value: `None` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_auth-workflow-auth-OAuth2.md b/docs/build/reference/customtask/cmem_plugin_auth-workflow-auth-OAuth2.md index d77518de5..cf5bc10b1 100644 --- a/docs/build/reference/customtask/cmem_plugin_auth-workflow-auth-OAuth2.md +++ b/docs/build/reference/customtask/cmem_plugin_auth-workflow-auth-OAuth2.md @@ -25,7 +25,6 @@ Note: The consuming task needs to have the parameter `oauth_access_token` in ord to use the output this task. You need to connect this task to the **config port** of the consuming task. - ## Parameter ### Grant Type @@ -36,8 +35,6 @@ Select the used OAuth Grant Type in order to specify how this plugin gets a vali - Datatype: `string` - Default Value: `client_credentials` - - ### Token Endpoint This is the OpenID Connect (OIDC) OAuth 2.0 token endpoint location (a HTTP(S) URL). @@ -46,8 +43,6 @@ This is the OpenID Connect (OIDC) OAuth 2.0 token endpoint location (a HTTP(S) U - Datatype: `string` - Default Value: `None` - - ### Client ID The Client ID obtained during registration. @@ -56,8 +51,6 @@ The Client ID obtained during registration. - Datatype: `string` - Default Value: `None` - - ### Client Secret The Client Secret obtained during registration. @@ -66,8 +59,6 @@ The Client Secret obtained during registration. - Datatype: `string` - Default Value: `None` - - ### Username The user account name used for authentication. @@ -76,8 +67,6 @@ The user account name used for authentication. - Datatype: `string` - Default Value: `None` - - ### Password The user account password. @@ -86,10 +75,6 @@ The user account password. - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_graph_insights-Update.md b/docs/build/reference/customtask/cmem_plugin_graph_insights-Update.md index c2fd760e1..ae62433a1 100644 --- a/docs/build/reference/customtask/cmem_plugin_graph_insights-Update.md +++ b/docs/build/reference/customtask/cmem_plugin_graph_insights-Update.md @@ -35,7 +35,6 @@ This workflow task updates [Graph Insights](https://go.eccenca.com/feature/explo - Graph Insights must be active in your system - User must have permissions to access Graph Insights - The plugin will skip execution with a warning if these conditions are not met - ## Parameter @@ -47,10 +46,6 @@ Selected graph to update snapshots for. Leave empty for updating all snapshots. - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### Timeout @@ -60,6 +55,3 @@ Timeout in seconds for Graph Insights API. - ID: `timeout` - Datatype: `double` - Default Value: `100` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_graphql-workflow-graphql-GraphQLPlugin.md b/docs/build/reference/customtask/cmem_plugin_graphql-workflow-graphql-GraphQLPlugin.md index fe32dbd52..84a51de24 100644 --- a/docs/build/reference/customtask/cmem_plugin_graphql-workflow-graphql-GraphQLPlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_graphql-workflow-graphql-GraphQLPlugin.md @@ -20,7 +20,6 @@ This workflow task performs GraphQL operations by sending in the GraphQL query using, Jinja queries and Jinja variables, which can be obtained from entities. The result of the query is saved as a JSON document in a pre-created JSON dataset. - ## Parameter @@ -32,8 +31,6 @@ The URL of the GraphQL endpoint you want to query. A collective list of public G - Datatype: `string` - Default Value: `None` - - ### Query The query text of the GraphQL Query you want to execute. GraphQL is a query language for APIs and a runtime for fulfilling those queries with your existing data. Learn more on GraphQL [here](https://graphql.org/). Example Query: query allFruits { fruits { id scientific_name tree_name fruit_name family origin description climatic_zone } } @@ -42,8 +39,6 @@ The query text of the GraphQL Query you want to execute. GraphQL is a query lang - Datatype: `multiline string` - Default Value: `None` - - ### Query variables Pass dynamic variables when making a query or mutation. Example Variables: {"id" : 1} @@ -52,10 +47,6 @@ Pass dynamic variables when making a query or mutation. Example Variables: {"id" - Datatype: `multiline string` - Default Value: `{}` - - - - ## Advanced Parameter ### Target JSON Dataset @@ -66,8 +57,6 @@ The Dataset where this task will save the JSON results. - Datatype: `string` - Default Value: `None` - - ### OAuth access token Access token that connects to a GraphQL endpoint to authorize and secure user access to resources and data. @@ -75,6 +64,3 @@ Access token that connects to a GraphQL endpoint to authorize and secure user ac - ID: `oauth_access_token` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_irdi-workflow-irdi_plugin-IrdiPlugin.md b/docs/build/reference/customtask/cmem_plugin_irdi-workflow-irdi_plugin-IrdiPlugin.md index ce18d15a4..586798d86 100644 --- a/docs/build/reference/customtask/cmem_plugin_irdi-workflow-irdi_plugin-IrdiPlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_irdi-workflow-irdi_plugin-IrdiPlugin.md @@ -22,13 +22,12 @@ IRDIs are unique for each combination of (non-advanced) parameters. If no input path is configured, values are read from the URIs of the input (Transformation Input). - All fields of the IRDI are configurable, except `Item Code`, which is created by the plugin. - - Created IRDIs are unique per configuration. + - Created IRDIs are unique per configuration. - Specify a graph that stores the state of Item Codes. - Input and output paths are configurable. - - if no input path is configured, values are read from the URIs of the input + - if no input path is configured, values are read from the URIs of the input (transformation input). - ## Parameter ### Counter graph @@ -39,68 +38,42 @@ Graph in which the Item Code (IC) counter is stored - Datatype: `string` - Default Value: `None` - - ### International Code Designator (ICD): Numeric, 4 characters - - - ID: `icd` - Datatype: `string` - Default Value: `None` - - ### Organization Identifier (OI): Numeric, 4 characters - - - ID: `oi` - Datatype: `string` - Default Value: `None` - - ### Organization Part Identifier (OPI): Alphanumeric, up to 35 characters (base36) - - - ID: `opi` - Datatype: `string` - Default Value: `None` - - ### OPI Source Indicator (OPIS): Numeric, 1 character - - - ID: `opis` - Datatype: `string` - Default Value: `None` - - ### Additional information (AI): Numeric, 4 characters - - - ID: `ai` - Datatype: `string` - Default Value: `None` - - ### Code-space identifier (CSI): Alphanumeric, 2 character (base36) - - - ID: `csi` - Datatype: `string` - Default Value: `None` - - ### Output path / property Path or property that will connect input values and their generated IRDIs @@ -109,10 +82,6 @@ Path or property that will connect input values and their generated IRDIs - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### Counted object @@ -123,8 +92,6 @@ The class of objects that are counted. (IRI) - Datatype: `string` - Default Value: `None` - - ### Input Schema Path / Property Path from which input values are taken. If empty, values are read from the URIs of the input @@ -132,6 +99,3 @@ Path from which input values are taken. If empty, values are read from the URIs - ID: `input_schema_path` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_jira-JqlQuery.md b/docs/build/reference/customtask/cmem_plugin_jira-JqlQuery.md index 89403bf3f..c42d05ce4 100644 --- a/docs/build/reference/customtask/cmem_plugin_jira-JqlQuery.md +++ b/docs/build/reference/customtask/cmem_plugin_jira-JqlQuery.md @@ -27,39 +27,28 @@ where you should connect a JSON Dataset. Note that you need to create an [API token](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/) for your Atlassian account, to access the API of your atlassian.net hosted Jira instance. - ## Parameter ### Jira Server -Base URL of the jira service, e.g. 'https://jira.example.org' +Base URL of the jira service, e.g. '' - ID: `base_url` - Datatype: `string` - Default Value: `None` - - ### Account - - - ID: `username` - Datatype: `string` - Default Value: `None` - - ### Password or Token - - - ID: `password` - Datatype: `password` - Default Value: `None` - - ### JQL Query Warning: An empty query string retrieves all issues. @@ -68,8 +57,6 @@ Warning: An empty query string retrieves all issues. - Datatype: `string` - Default Value: `None` - - ### Limit Maximum number of issues to retrieve (0 = retrieve all issues). @@ -78,22 +65,14 @@ Maximum number of issues to retrieve (0 = retrieve all issues). - Datatype: `Long` - Default Value: `0` - - - - ## Advanced Parameter ### Verify SSL Connection - - - ID: `ssl_verify` - Datatype: `boolean` - Default Value: `true` - - ### Connection Timeout Number of seconds, the plugin will wait to establish a connection to the Jira Service. @@ -102,8 +81,6 @@ Number of seconds, the plugin will wait to establish a connection to the Jira Se - Datatype: `Long` - Default Value: `300` - - ### Results per Page Number of items to return per request. @@ -111,6 +88,3 @@ Number of items to return per request. - ID: `results_per_page` - Datatype: `Long` - Default Value: `100` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md b/docs/build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md index 5a8f783a8..629e6e5c8 100644 --- a/docs/build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md +++ b/docs/build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow operator uses the Kafka Consumer API to receive messages from an [Apache Kafka](https://kafka.apache.org/) topic. @@ -112,7 +111,6 @@ In this case, a sample response from the consumer will appear as follows:
- ## Parameter ### Messages Dataset @@ -123,8 +121,6 @@ Where do you want to save the messages? The dropdown lists usable datasets from - Datatype: `string` - Default Value: `None` - - ### Bootstrap Server This is URL of one of the Kafka brokers. The task fetches the initial metadata about your Kafka cluster from this URL. @@ -133,8 +129,6 @@ This is URL of one of the Kafka brokers. The task fetches the initial metadata a - Datatype: `string` - Default Value: `None` - - ### Security Protocol Which security mechanisms need to be applied to connect? Use PLAINTEXT in case you connect to a plain Kafka, which is available inside your VPN. Use SASL in case you connect to a [confluent.cloud](https://confluent.cloud) cluster (then you also need to specify your SASL credentials in the advanced options section). @@ -143,8 +137,6 @@ Which security mechanisms need to be applied to connect? Use PLAINTEXT in case y - Datatype: `string` - Default Value: `PLAINTEXT` - - ### Topic The name of the category/feed where messages were published. @@ -153,22 +145,14 @@ The name of the category/feed where messages were published. - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### SASL Mechanisms - - - ID: `sasl_mechanisms` - Datatype: `string` - Default Value: `PLAIN` - - ### SASL Account The account identifier for the SASL authentication. In case you are using a [confluent.cloud](https://confluent.cloud) cluster, this is the API key. @@ -177,8 +161,6 @@ The account identifier for the SASL authentication. In case you are using a [con - Datatype: `string` - Default Value: `None` - - ### SASL Password The credentials for the SASL Account. In case you are using a [confluent.cloud](https://confluent.cloud) cluster, this is the API secret. @@ -187,8 +169,6 @@ The credentials for the SASL Account. In case you are using a [confluent.cloud]( - Datatype: `password` - Default Value: `None` - - ### Auto Offset Reset What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server (e.g. because that data has been deleted). - `earliest` will fetch the whole topic beginning from the oldest record. - `latest` will receive nothing but will get any new records on the next run. @@ -197,8 +177,6 @@ What to do when there is no initial offset in Kafka or if the current offset doe - Datatype: `string` - Default Value: `latest` - - ### Consumer Group Name When a topic is consumed by consumers in the same group, every record will be delivered to only one consumer of that group. If all the consumers of a topic are labeled the same consumer group, then the records will effectively be load-balanced over these consumers. If all the consumer of a topic are labeled different consumer groups, then each record will be broadcast to all the consumers. When the Group Id field is empty, the plugin defaults to DNS:PROJECT ID:TASK ID. @@ -207,8 +185,6 @@ When a topic is consumed by consumers in the same group, every record will be de - Datatype: `string` - Default Value: `None` - - ### Client Id An optional identifier of a Kafka client (producer/consumer) that is passed to a Kafka broker with every request. The sole purpose of this is to be able to track the source of requests beyond just ip and port by allowing a logical application name to be included in Kafka logs and monitoring aggregates. When the Client Id field is empty, the plugin defaults to DNS:PROJECT ID:TASK ID. @@ -217,8 +193,6 @@ An optional identifier of a Kafka client (producer/consumer) that is passed to a - Datatype: `string` - Default Value: `None` - - ### Local Consumer Queue Size Maximum total message size in kilobytes that the consumer can buffer for a specific partition. The consumer will stop fetching from the partition if it hits this limit. This helps prevent consumers from running out of memory. @@ -227,8 +201,6 @@ Maximum total message size in kilobytes that the consumer can buffer for a speci - Datatype: `Long` - Default Value: `5000` - - ### Message Limit The maximum number of messages to fetch and process in each run. If 0 or less, all messages will be fetched. @@ -237,8 +209,6 @@ The maximum number of messages to fetch and process in each run. If 0 or less, a - Datatype: `Long` - Default Value: `100000` - - ### Disable Commit Setting this to true will disable committing messages after retrival. This means you will receive the same messages on the next execution (for debugging). @@ -246,6 +216,3 @@ Setting this to true will disable committing messages after retrival. This means - ID: `disable_commit` - Datatype: `boolean` - Default Value: `false` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_kafka-SendMessages.md b/docs/build/reference/customtask/cmem_plugin_kafka-SendMessages.md index a4e6d1978..e65bad336 100644 --- a/docs/build/reference/customtask/cmem_plugin_kafka-SendMessages.md +++ b/docs/build/reference/customtask/cmem_plugin_kafka-SendMessages.md @@ -28,7 +28,6 @@ on configuration. An example XML document is shown below. This document will be sent as two messages to the configured topic. Each message is created as a proper XML document. - ```xml @@ -58,7 +57,6 @@ on configuration. An example JSON document is shown below. This document will be sent as two messages to the configured topic. Each message is created as a proper JSON document. - ```json [ { @@ -105,8 +103,6 @@ on configuration.
- - ## Parameter ### Messages Dataset @@ -117,8 +113,6 @@ Where do you want to retrieve the messages from? The dropdown lists usable datas - Datatype: `string` - Default Value: `None` - - ### Bootstrap Server This is URL of one of the Kafka brokers. The task fetches the initial metadata about your Kafka cluster from this URL. @@ -127,8 +121,6 @@ This is URL of one of the Kafka brokers. The task fetches the initial metadata a - Datatype: `string` - Default Value: `None` - - ### Security Protocol Which security mechanisms need to be applied to connect? Use PLAINTEXT in case you connect to a plain Kafka, which is available inside your VPN. Use SASL in case you connect to a [confluent.cloud](https://confluent.cloud) cluster (then you also need to specify your SASL credentials in the advanced options section). @@ -137,8 +129,6 @@ Which security mechanisms need to be applied to connect? Use PLAINTEXT in case y - Datatype: `string` - Default Value: `PLAINTEXT` - - ### Topic The name of the category/feed to which the messages will be published. Note that you may create this topic in advance before publishing messages to it. This is especially true for a kafka cluster hosted at [confluent.cloud](https://confluent.cloud). @@ -147,22 +137,14 @@ The name of the category/feed to which the messages will be published. Note that - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### SASL Mechanisms - - - ID: `sasl_mechanisms` - Datatype: `string` - Default Value: `PLAIN` - - ### SASL Account The account identifier for the SASL authentication. In case you are using a [confluent.cloud](https://confluent.cloud) cluster, this is the API key. @@ -171,8 +153,6 @@ The account identifier for the SASL authentication. In case you are using a [con - Datatype: `string` - Default Value: `None` - - ### SASL Password The credentials for the SASL Account. In case you are using a [confluent.cloud](https://confluent.cloud) cluster, this is the API secret. @@ -181,8 +161,6 @@ The credentials for the SASL Account. In case you are using a [confluent.cloud]( - Datatype: `password` - Default Value: `None` - - ### Client Id An optional identifier of a Kafka client (producer/consumer) that is passed to a Kafka broker with every request. The sole purpose of this is to be able to track the source of requests beyond just ip and port by allowing a logical application name to be included in Kafka logs and monitoring aggregates. When the Client Id field is empty, the plugin defaults to DNS:PROJECT ID:TASK ID. @@ -191,8 +169,6 @@ An optional identifier of a Kafka client (producer/consumer) that is passed to a - Datatype: `string` - Default Value: `None` - - ### Maximum Message Size The maximum size of a request message in bytes. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. @@ -201,8 +177,6 @@ The maximum size of a request message in bytes. This is also effectively a cap o - Datatype: `Long` - Default Value: `1048576` - - ### Compression Type The compression type for all data generated by the producer. The default is none (i.e. no compression). @@ -210,6 +184,3 @@ The compression type for all data generated by the producer. The default is none - ID: `compression_type` - Datatype: `string` - Default Value: `none` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_kubernetes-Execute.md b/docs/build/reference/customtask/cmem_plugin_kubernetes-Execute.md index de8a8b338..6ab16a826 100644 --- a/docs/build/reference/customtask/cmem_plugin_kubernetes-Execute.md +++ b/docs/build/reference/customtask/cmem_plugin_kubernetes-Execute.md @@ -15,15 +15,14 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This plugin enables execution of commands inside Kubernetes pods and captures their output. ## Features - Supports multiple connection types: - - **In-cluster**: Uses the service account kubernetes gives to pods + - **In-cluster**: Uses the service account kubernetes gives to pods (for plugins running inside k8s) - - **Explicit config**: Uses a YAML kubeconfig file for external connections + - **Explicit config**: Uses a YAML kubeconfig file for external connections - Executes shell commands in specified pods within namespaces - Captures both stdout and stderr output - Returns command output as a file entity for further processing @@ -41,7 +40,6 @@ downstream workflow tasks. - Executing maintenance scripts from within or outside the cluster - Gathering system information and logs - Performing health checks and troubleshooting - ## Parameter @@ -53,8 +51,6 @@ The type of configuration you wish to use. - Datatype: `string` - Default Value: `explicit` - - ### Namespace Namespaces provide a mechanism for isolating groups of resources. @@ -63,8 +59,6 @@ Namespaces provide a mechanism for isolating groups of resources. - Datatype: `string` - Default Value: `None` - - ### Pod Pods are an abstraction that represent groups of one or more application containers (such as Docker), and some shared resources for those containers. @@ -73,8 +67,6 @@ Pods are an abstraction that represent groups of one or more application contain - Datatype: `string` - Default Value: `None` - - ### Container In case there is more than one container in the pod OR the default container selection does not work, you need to specify the container ID in addition to the pod ID. @@ -83,8 +75,6 @@ In case there is more than one container in the pod OR the default container sel - Datatype: `string` - Default Value: `None` - - ### Command The command to execute. @@ -93,10 +83,6 @@ The command to execute. - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### Kube Config @@ -106,6 +92,3 @@ YAML source code of the kube config. - ID: `kube_config` - Datatype: `code-yaml` - Default Value: `None` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md b/docs/build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md index 94cc7864d..ee591289f 100644 --- a/docs/build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md +++ b/docs/build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This plugin creates vector embeddings from text data using an OpenAI compatible embeddings API. It processes input entities containing text data and generates high-dimensional vector representations that capture semantic meaning. @@ -51,8 +50,6 @@ The base URL of the OpenAI compatible API (without endpoint path). - Datatype: `string` - Default Value: `https://api.openai.com/v1/` - - ### API Type Select the API client type. This determines the authentication method and endpoint configuration used for API requests. Choose `OPENAI` for direct OpenAI API access or `AZURE_OPENAI` for Azure-hosted OpenAI services. Consider using the API version advanced parameter in case you access Azure-hosted OpenAI services. @@ -61,8 +58,6 @@ Select the API client type. This determines the authentication method and endpoi - Datatype: `enumeration` - Default Value: `OPENAI` - - ### API key An optional API key for authentication. @@ -71,8 +66,6 @@ An optional API key for authentication. - Datatype: `password` - Default Value: `None` - - ### Embeddings model The identifier of the embeddings model to use. Available model IDs for some public providers can be found here: [Claude](https://docs.claude.com/en/docs/build-with-claude/embeddings#available-models), [OpenAI](https://platform.openai.com/docs/guides/embeddings#embedding-models). @@ -81,8 +74,6 @@ The identifier of the embeddings model to use. Available model IDs for some publ - Datatype: `string` - Default Value: `text-embedding-3-small` - - ### Embedding entity paths (comma-separated list) Changing this value will change, which input paths are used by the workflow task to calculate embeddings. A blank value means, all paths are used. @@ -91,8 +82,6 @@ Changing this value will change, which input paths are used by the workflow task - Datatype: `string` - Default Value: `text` - - ### Forward entity paths (comma-separated list) Paths from input entities to forward to output without modification. These paths will be passed through unchanged alongside embeddings. @@ -101,10 +90,6 @@ Paths from input entities to forward to output without modification. These paths - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### API Version @@ -115,8 +100,6 @@ Azure OpenAI API version (only used when API Type is `AZURE_OPENAI`). For more i - Datatype: `string` - Default Value: `None` - - ### Timeout (milliseconds) The timeout for a single API request in milliseconds. @@ -125,8 +108,6 @@ The timeout for a single API request in milliseconds. - Datatype: `Long` - Default Value: `10000` - - ### Entries Processing Buffer How many input values do you want to send per request? @@ -135,8 +116,6 @@ How many input values do you want to send per request? - Datatype: `Long` - Default Value: `100` - - ### Entity Embedding text (output) Changing this value will change the output schema accordingly. Default: _embedding_source @@ -145,8 +124,6 @@ Changing this value will change the output schema accordingly. Default: _embeddi - Datatype: `string` - Default Value: `_embedding_source` - - ### Entity Embedding path (output) Changing this value will change the output schema accordingly. Default: _embedding @@ -154,6 +131,3 @@ Changing this value will change the output schema accordingly. Default: _embeddi - ID: `embedding_output_path` - Datatype: `string` - Default Value: `_embedding` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md b/docs/build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md index 064413dc3..8a07710ec 100644 --- a/docs/build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md +++ b/docs/build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - ## Overview This plugin executes Large Language Model (LLM) instructions over entity collections, enabling @@ -80,12 +79,14 @@ class StructuredOutput(BaseModel): ## Performance Features Parallel Processing: + - **Concurrent Requests**: Configurable semaphore-controlled API calls - **Batch Processing**: Entities processed in configurable batch sizes - **Rate Limiting**: Optional delays between requests - **Memory Optimization**: Streaming processing with generator patterns Error Handling: + - **Graceful Degradation**: Continue processing on API errors (configurable) - **Detailed Logging**: Comprehensive error reporting and debugging information - **Workflow Integration**: Proper cancellation support and progress reporting @@ -93,11 +94,13 @@ Error Handling: ## API Compatibility Supported Providers: + - **OpenAI**: Direct API access with full feature support - **Azure OpenAI**: Enterprise Azure-hosted services with API versioning - **OpenAI-Compatible**: Anthropic Claude, OpenRouter, local models, and other compatible endpoints Authentication: + - **API Keys**: Secure password-type parameters for API authentication - **Azure Integration**: Supports Azure OpenAI API versioning and endpoint configuration - **Flexible Endpoints**: Custom base URLs for various providers @@ -105,6 +108,7 @@ Authentication: ## Advanced Configuration ### Message Templates + Customize the conversation structure beyond simple prompts: ```json @@ -115,6 +119,7 @@ Customize the conversation structure beyond simple prompts: ``` ### Performance Tuning + - **Temperature Control**: Adjust creativity vs. determinism (0.0-2.0) - **Timeout Management**: Request-level timeout configuration - **Concurrency Limits**: Prevent rate limiting with request throttling @@ -130,7 +135,6 @@ Customize the conversation structure beyond simple prompts: For detailed prompting guidance, see [OpenAI's Text Generation Guide](https://platform.openai.com/docs/guides/text?api-mode=chat). - ## Parameter ### Base URL @@ -141,8 +145,6 @@ The base URL of the OpenAI compatible API (without endpoint path). - Datatype: `string` - Default Value: `https://api.openai.com/v1/` - - ### API Type Select the API client type. This determines the authentication method and endpoint configuration used for API requests. Choose `OPENAI` for direct OpenAI API access or `AZURE_OPENAI` for Azure-hosted OpenAI services. Consider using the API version advanced parameter in case you access Azure-hosted OpenAI services. @@ -151,8 +153,6 @@ Select the API client type. This determines the authentication method and endpoi - Datatype: `enumeration` - Default Value: `OPENAI` - - ### API key An optional API key for authentication. @@ -161,8 +161,6 @@ An optional API key for authentication. - Datatype: `password` - Default Value: `None` - - ### Instruct Model The identifier of the instruct model to use. Note that some provider do not support a model list endpoint. Just create a custom entry then. Available model IDs for some public providers can be found here: [OpenAI](https://platform.openai.com/docs/models), [Claude](https://docs.claude.com/en/docs/about-claude/models/overview), [OpenRouter](https://openrouter.ai/models), [Azure](https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure). **Note:** For STRUCTURED_OUTPUT format, only certain models support structured outputs. See [OpenAI Structured Outputs Guide](https://platform.openai.com/docs/guides/structured-outputs) for supported models. @@ -171,8 +169,6 @@ The identifier of the instruct model to use. Note that some provider do not supp - Datatype: `string` - Default Value: `gpt-4o-mini` - - ### Instruction Prompt Template The instruction prompt template. Please have a look at the task documentation for detailed instructions. @@ -181,10 +177,6 @@ The instruction prompt template. Please have a look at the task documentation fo - Datatype: `code-jinja2` - Default Value: `Write a paragraph about this entity: {{ entity }}` - - - - ## Advanced Parameter ### API Version @@ -195,8 +187,6 @@ Azure OpenAI API version (only used when API Type is `AZURE_OPENAI`). For more i - Datatype: `string` - Default Value: `None` - - ### Temperature (between 0 and 2) A parameter that controls the randomness and creativity of the model. A high temperature value (`0.8` - `1.0`) increases randomness and creativity. This is useful for open-ended tasks like storytelling or brainstorming. A low temperature value (`0.0` - `0.4`) produces more deterministic and focused outputs. This is suitable for factual or technical tasks. @@ -205,8 +195,6 @@ A parameter that controls the randomness and creativity of the model. A high tem - Datatype: `double` - Default Value: `1.0` - - ### Timeout (seconds) The timeout for a single API request in seconds. @@ -215,8 +203,6 @@ The timeout for a single API request in seconds. - Datatype: `double` - Default Value: `300` - - ### Instruction Output Path The entity path where the instruction result will be provided. Note: This parameter is not used when Output Format is set to STRUCTURED_OUTPUT. For structured outputs, only the Pydantic model fields are included in the output schema. @@ -225,8 +211,6 @@ The entity path where the instruction result will be provided. Note: This parame - Datatype: `string` - Default Value: `_instruction_output` - - ### Messages Template A list of messages comprising the conversation compatible with OpenAI chat completion API message object. Have look at [Message roles and instruction following](https://platform.openai.com/docs/guides/text#message-roles-and-instruction-following) to learn about different levels of priority to messages with different roles. @@ -234,6 +218,7 @@ A list of messages comprising the conversation compatible with OpenAI chat compl - ID: `messages_template` - Datatype: `code-json` - Default Value: + ``` json [ { @@ -247,8 +232,6 @@ A list of messages comprising the conversation compatible with OpenAI chat compl ] ``` - - ### Output Format Specifying the format that the model must output. Possible values are `TEXT` - Standard text output, `STRUCTURED_OUTPUT` - output follows a given schema. Add your schema as Pydantic model in the parameter below, `JSON_MODE` - a more basic version of the structured outputs feature where you have to add your structure to the prompt template. @@ -257,8 +240,6 @@ Specifying the format that the model must output. Possible values are `TEXT` - S - Datatype: `enumeration` - Default Value: `TEXT` - - ### Pydantic Schema The Pydantic schema definition with a mandatory class named `StructuredOutput(BaseModel)`. This is only used in combination with the Structured Output format. A schema may have up to 100 object properties total, with up to 5 levels of nesting. The total string length of all property names, definition names, enum values, and const values cannot exceed 15,000 characters. @@ -266,6 +247,7 @@ The Pydantic schema definition with a mandatory class named `StructuredOutput(Ba - ID: `pydantic_schema` - Datatype: `code-python` - Default Value: + ``` python from pydantic import BaseModel @@ -276,8 +258,6 @@ class StructuredOutput(BaseModel): ``` - - ### Raise on API errors How to react on API errors. When enable, any API errors will cause the workflow to stop with an exception. When disabled, API errors are logged and the error message is written to the entity output, allowing the workflow to continue processing other entities. @@ -286,8 +266,6 @@ How to react on API errors. When enable, any API errors will cause the workflow - Datatype: `boolean` - Default Value: `true` - - ### Maximum Concurrent Requests Maximum number of concurrent API requests to prevent rate limiting and resource exhaustion. @@ -296,8 +274,6 @@ Maximum number of concurrent API requests to prevent rate limiting and resource - Datatype: `Long` - Default Value: `10` - - ### Batch Size Number of entities to process in each batch for memory optimization. @@ -306,8 +282,6 @@ Number of entities to process in each batch for memory optimization. - Datatype: `Long` - Default Value: `100` - - ### Request Delay (seconds) Delay between API requests in seconds to respect rate limits. @@ -315,6 +289,3 @@ Delay between API requests in seconds to respect rate limits. - ID: `request_delay` - Datatype: `double` - Default Value: `0.0` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_loopwf-task-StartWorkflow.md b/docs/build/reference/customtask/cmem_plugin_loopwf-task-StartWorkflow.md index 7dd1bd948..2ada6d81d 100644 --- a/docs/build/reference/customtask/cmem_plugin_loopwf-task-StartWorkflow.md +++ b/docs/build/reference/customtask/cmem_plugin_loopwf-task-StartWorkflow.md @@ -70,7 +70,6 @@ Input schema paths: `label`, `id` → JSON payload: `{ "label": "Example", "id" - Batch operations that require complex per-entity logic encapsulated in a workflow. - Quality checks where each entity must pass through a dedicated validation workflow. - ## Parameter ### Workflow @@ -81,30 +80,18 @@ Which workflow do you want to start per entity. - Datatype: `string` - Default Value: `None` - - ### How many workflow jobs should run in parallel? - - - ID: `parallel_execution` - Datatype: `Long` - Default Value: `1` - - ### Forward incoming entities to the output port? - - - ID: `forward_entities` - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter ### Mime-type for file by file processing (beta) @@ -114,6 +101,3 @@ When working with file entities, setting this to a proper value will send the fi - ID: `input_mime_type` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_mattermost.md b/docs/build/reference/customtask/cmem_plugin_mattermost.md index 851264974..d0c51d068 100644 --- a/docs/build/reference/customtask/cmem_plugin_mattermost.md +++ b/docs/build/reference/customtask/cmem_plugin_mattermost.md @@ -41,19 +41,16 @@ input paths are recognized: - channel - message - ## Parameter ### URL -The base URL of your Mattermost deployment. Example: https://mattermost.example.org +The base URL of your Mattermost deployment. Example: - ID: `url` - Datatype: `string` - Default Value: `None` - - ### Access Token The Personal Access Token of the bot account. @@ -62,8 +59,6 @@ The Personal Access Token of the bot account. - Datatype: `password` - Default Value: `None` - - ### Bot name The name or display name of the bot you want to use to connect. @@ -72,8 +67,6 @@ The name or display name of the bot you want to use to connect. - Datatype: `string` - Default Value: `None` - - ### User The user account which will receive the message. You can search for users if the connection was successful (Base URl, bot + token). @@ -82,8 +75,6 @@ The user account which will receive the message. You can search for users if the - Datatype: `string` - Default Value: `None` - - ### Channel The channel which will receive the message. You can search for channels if the connection was successful (Base URl, bot + token). If you want to send your message to multiple channels, separate them with a comma. @@ -92,8 +83,6 @@ The channel which will receive the message. You can search for channels if the c - Datatype: `string` - Default Value: `None` - - ### Message The message size is limited to a configured maximum (e.g. 16383 characters). @@ -102,10 +91,6 @@ The message size is limited to a configured maximum (e.g. 16383 characters). - Datatype: `multiline string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_nextcloud-Download.md b/docs/build/reference/customtask/cmem_plugin_nextcloud-Download.md index 649ede01c..a6917aaae 100644 --- a/docs/build/reference/customtask/cmem_plugin_nextcloud-Download.md +++ b/docs/build/reference/customtask/cmem_plugin_nextcloud-Download.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task downloads files from a specified Nextcloud service instance. Given the URL of the target Nextcloud instance along with your credentials, you can specify any @@ -24,15 +23,16 @@ include or exclude specific files within the selected directory. The files are n project resources, but are only available within the workflow itself. #### Nextcloud List Files input + If this workflow has an input, it will take the data that comes in instead of the selected values. This works only with the schema used in the **Nextcloud List Files** Plugin. Make sure you still add the appropriate URL, identification and token. -#### Important: +#### Important + To establish a secure connection, you must generate a dedicated [app-specific password and username](https://docs.nextcloud.com/server/latest/user_manual/de/session_management.html) in the Security section of your Nextcloud account settings. Do not use your standard login credentials. - ## Parameter @@ -44,8 +44,6 @@ The Base URL of your Nextcloud service, e.g. `https://cloud.example.com`. - Datatype: `string` - Default Value: `None` - - ### API identification The identification generated by the app-password function in Nextcloud. @@ -54,8 +52,6 @@ The identification generated by the app-password function in Nextcloud. - Datatype: `string` - Default Value: `None` - - ### API token The token generated by the app-password function in Nextcloud. @@ -64,8 +60,6 @@ The token generated by the app-password function in Nextcloud. - Datatype: `password` - Default Value: `None` - - ### File or directory path The path to a specific directory from which to download files.Includes all the subdirectories. Leave empty or type '/' for root directory.When pasting a directory path, select 'Custom entry:' to trigger autocompletion andcheck if the folder is recognized. @@ -74,8 +68,6 @@ The path to a specific directory from which to download files.Includes all the s - Datatype: `string` - Default Value: `None` - - ### File expression using * A filepath for searching specified files through the given path. E.g. searching for *.txt results in all .txt files under the given directory and its subdirectories. @@ -84,8 +76,6 @@ A filepath for searching specified files through the given path. E.g. searching - Datatype: `string` - Default Value: `None` - - ### Error on empty result A flag indicating weather an empty output will throw an error. @@ -94,9 +84,7 @@ A flag indicating weather an empty output will throw an error. - Datatype: `boolean` - Default Value: `false` - - -### Exclude files in subfolders from download. +### Exclude files in subfolders from download A flag indicating whether files located in subfolders should be excluded from the download. @@ -104,10 +92,6 @@ A flag indicating whether files located in subfolders should be excluded from th - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_nextcloud-List.md b/docs/build/reference/customtask/cmem_plugin_nextcloud-List.md index 36ec8af96..e2d46cd30 100644 --- a/docs/build/reference/customtask/cmem_plugin_nextcloud-List.md +++ b/docs/build/reference/customtask/cmem_plugin_nextcloud-List.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task creates a structured output from a specified Nextcloud service instance. @@ -23,11 +22,11 @@ Given the URL of the target Nextcloud instance along with your credentials, you directory from which data should be extracted. Additionally, you may define file patterns to include or exclude specific files within the selected directory. -#### Important: +#### Important + To establish a secure connection, you must generate a dedicated [app-specific password and username](https://docs.nextcloud.com/server/latest/user_manual/de/session_management.html) in the Security section of your Nextcloud account settings. Do not use your standard login credentials. - ## Parameter @@ -39,8 +38,6 @@ The Base URL of your Nextcloud service, e.g. `https://cloud.example.com`. - Datatype: `string` - Default Value: `None` - - ### API identification The identification generated by the app-password function in Nextcloud. @@ -49,8 +46,6 @@ The identification generated by the app-password function in Nextcloud. - Datatype: `string` - Default Value: `None` - - ### API token The token generated by the app-password function in Nextcloud. @@ -59,8 +54,6 @@ The token generated by the app-password function in Nextcloud. - Datatype: `password` - Default Value: `None` - - ### File or directory path The path of a specific file or directory that needs to be transformed. Includes all the subdirectories. Leave empty or type '/' for root directory.When pasting a directory path, select 'Custom entry:' to trigger autocompletion andcheck if the folder is recognized. @@ -69,8 +62,6 @@ The path of a specific file or directory that needs to be transformed. Includes - Datatype: `string` - Default Value: `None` - - ### File expression using * A filepath for searching specified files through the given path.Leave blank for all file types.E.g. searching for *.txt results in all .txt files under the given directory and its subdirectories. @@ -79,8 +70,6 @@ A filepath for searching specified files through the given path.Leave blank for - Datatype: `string` - Default Value: `None` - - ### Error on empty result A flag indicating weather an empty output will throw an error. @@ -89,9 +78,7 @@ A flag indicating weather an empty output will throw an error. - Datatype: `boolean` - Default Value: `false` - - -### Exclude files in subfolders. +### Exclude files in subfolders A flag indicating whether files located in subfolders should be excluded from the workflow. @@ -99,10 +86,6 @@ A flag indicating whether files located in subfolders should be excluded from th - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_nextcloud-Upload.md b/docs/build/reference/customtask/cmem_plugin_nextcloud-Upload.md index e4c1d3b66..bc5259d28 100644 --- a/docs/build/reference/customtask/cmem_plugin_nextcloud-Upload.md +++ b/docs/build/reference/customtask/cmem_plugin_nextcloud-Upload.md @@ -15,21 +15,21 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task uploads files to a specified Nextcloud service instance. Given the URL of the target Nextcloud instance along with your credentials, you can specify any directory to which files should be uploaded. ### Input + If this workflow has an input, it will take the data that comes in instead of the selected source file. -#### Important: +#### Important + To establish a secure connection, you must generate a dedicated [app-specific password and username](https://docs.nextcloud.com/server/latest/user_manual/de/session_management.html) in the Security section of your Nextcloud account settings. Do not use your standard login credentials. - ## Parameter @@ -41,8 +41,6 @@ The Base URL of your Nextcloud service, e.g. `https://cloud.example.com`. - Datatype: `string` - Default Value: `None` - - ### API identification The identification generated by the app-password function in Nextcloud. @@ -51,8 +49,6 @@ The identification generated by the app-password function in Nextcloud. - Datatype: `string` - Default Value: `None` - - ### API token The token generated by the app-password function in Nextcloud. @@ -61,8 +57,6 @@ The token generated by the app-password function in Nextcloud. - Datatype: `password` - Default Value: `None` - - ### File or directory path The path to a specific directory to which to upload files.Leave empty or type '/' for root directory.When pasting a directory path, select 'Custom entry:' to trigger autocompletion andcheck if the folder is recognized. @@ -71,10 +65,6 @@ The path to a specific directory to which to upload files.Leave empty or type '/ - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### File @@ -84,6 +74,3 @@ The file which you would like to upload. If this is set, no connections in the w - ID: `source_file` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_office365-Download.md b/docs/build/reference/customtask/cmem_plugin_office365-Download.md index 924499729..4cbdadfe8 100644 --- a/docs/build/reference/customtask/cmem_plugin_office365-Download.md +++ b/docs/build/reference/customtask/cmem_plugin_office365-Download.md @@ -15,12 +15,12 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task downloads files from a specified Office 365 instance. For this to work a registered app in Microsoft's Entra ID space is necessary. Further information can be found [here](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app). After registering an application, it needs to be granted application wide API permissions: + - Files.Read.All - Sites.Read.All @@ -29,10 +29,10 @@ With this setup, anyone with the secret can access all users' OneDrives and all sites. #### Important + Make sure only trusted admins can create or manage secrets! Whoever holds the secrets has all the access to granted resources so best not to distribute recklessly. - ## Parameter @@ -44,8 +44,6 @@ ID of your tenant. Can be seen within your registered application - Datatype: `string` - Default Value: `None` - - ### Client ID Client ID of your registered application. @@ -54,8 +52,6 @@ Client ID of your registered application. - Datatype: `string` - Default Value: `None` - - ### Client secret Client secret created withing your registered application. @@ -64,8 +60,6 @@ Client secret created withing your registered application. - Datatype: `password` - Default Value: `None` - - ### Type resource The type of resource you want the data to be extracted from. This can either be a site or a users share @@ -74,8 +68,6 @@ The type of resource you want the data to be extracted from. This can either be - Datatype: `string` - Default Value: `None` - - ### Target resource Target resource which files will be listed from. This can either be a specific users share address or a microsoft site URL. @@ -84,8 +76,6 @@ Target resource which files will be listed from. This can either be a specific u - Datatype: `string` - Default Value: `None` - - ### Drives A list of drives from the selected target resource. @@ -94,8 +84,6 @@ A list of drives from the selected target resource. - Datatype: `string` - Default Value: `None` - - ### Directory path The path of a directory that needs to be transformed. Includes all subdirectories by default @@ -104,8 +92,6 @@ The path of a directory that needs to be transformed. Includes all subdirectorie - Datatype: `string` - Default Value: `None` - - ### Regular expression A regular expression performed on all the files within the selected path @@ -114,8 +100,6 @@ A regular expression performed on all the files within the selected path - Datatype: `string` - Default Value: `^.*$` - - ### Exclude files in subfolders A flag indicating if files should only be listed from subfolders or not. @@ -124,10 +108,6 @@ A flag indicating if files should only be listed from subfolders or not. - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter ### Maximum amount of workers @@ -137,6 +117,3 @@ Specifies the maximum number of threads used for parallel execution of the workf - ID: `max_workers` - Datatype: `Long` - Default Value: `32` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_office365-List.md b/docs/build/reference/customtask/cmem_plugin_office365-List.md index e661c423e..529d3c0d4 100644 --- a/docs/build/reference/customtask/cmem_plugin_office365-List.md +++ b/docs/build/reference/customtask/cmem_plugin_office365-List.md @@ -15,12 +15,12 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task creates a structured output from a specified Office 365 instance. For this to work a registered app in Microsoft's Entra ID space is necessary. Further information can be found [here](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app). After registering an application, it needs to be granted application wide API permissions: + - Files.Read.All - Sites.Read.All @@ -29,10 +29,10 @@ With this setup, anyone with the secret can access all users' OneDrives and all sites. #### Important + Make sure only trusted admins can create or manage secrets! Whoever holds the secrets has all the access to granted resources so best not to distribute recklessly. - ## Parameter @@ -44,8 +44,6 @@ ID of your tenant. Can be seen within your registered application - Datatype: `string` - Default Value: `None` - - ### Client ID Client ID of your registered application. @@ -54,8 +52,6 @@ Client ID of your registered application. - Datatype: `string` - Default Value: `None` - - ### Client secret Client secret created withing your registered application. @@ -64,8 +60,6 @@ Client secret created withing your registered application. - Datatype: `password` - Default Value: `None` - - ### Type resource The type of resource you want the data to be extracted from. This can either be a site or a users share @@ -74,8 +68,6 @@ The type of resource you want the data to be extracted from. This can either be - Datatype: `string` - Default Value: `None` - - ### Target resource Target resource which files will be listed from. This can either be a specific users share address or a microsoft site URL. @@ -84,8 +76,6 @@ Target resource which files will be listed from. This can either be a specific u - Datatype: `string` - Default Value: `None` - - ### Drives A list of drives from the selected target resource. @@ -94,8 +84,6 @@ A list of drives from the selected target resource. - Datatype: `string` - Default Value: `None` - - ### Directory path The path of a directory that needs to be transformed. Includes all subdirectories by default @@ -104,8 +92,6 @@ The path of a directory that needs to be transformed. Includes all subdirectorie - Datatype: `string` - Default Value: `None` - - ### Regular expression A regular expression performed on all the files within the selected path @@ -114,8 +100,6 @@ A regular expression performed on all the files within the selected path - Datatype: `string` - Default Value: `^.*$` - - ### Exclude files in subfolders A flag indicating if files should only be listed from subfolders or not. @@ -124,10 +108,6 @@ A flag indicating if files should only be listed from subfolders or not. - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter ### Maximum amount of workers @@ -137,6 +117,3 @@ Specifies the maximum number of threads used for parallel execution of the workf - ID: `max_workers` - Datatype: `Long` - Default Value: `32` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_office365-Upload.md b/docs/build/reference/customtask/cmem_plugin_office365-Upload.md index 00467fed7..e4216ee6a 100644 --- a/docs/build/reference/customtask/cmem_plugin_office365-Upload.md +++ b/docs/build/reference/customtask/cmem_plugin_office365-Upload.md @@ -15,12 +15,12 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task upload files to specified Office 365 instance. For this to work a registered app in Microsoft's Entra ID space is necessary. Further information can be found [here](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app). After registering an application, it needs to be granted application wide API permissions: + - Files.Read.All, Files.Write.All - Sites.Read.All, Sites.Write.All @@ -29,10 +29,10 @@ With this setup, anyone with the secret can access all users' OneDrives and all sites. #### Important + Make sure only trusted admins can create or manage secrets! Whoever holds the secrets has all the access to granted resources so best not to distribute recklessly. - ## Parameter @@ -44,8 +44,6 @@ ID of your tenant. Can be seen within your registered application - Datatype: `string` - Default Value: `None` - - ### Client ID Client ID of your registered application. @@ -54,8 +52,6 @@ Client ID of your registered application. - Datatype: `string` - Default Value: `None` - - ### Client secret Client secret created withing your registered application. @@ -64,8 +60,6 @@ Client secret created withing your registered application. - Datatype: `password` - Default Value: `None` - - ### Type resource The type of resource you want the data to be extracted from. This can either be a site or a users share @@ -74,8 +68,6 @@ The type of resource you want the data to be extracted from. This can either be - Datatype: `string` - Default Value: `None` - - ### Target resource Target resource which files will be listed from. This can either be a specific users share address or a microsoft site URL. @@ -84,8 +76,6 @@ Target resource which files will be listed from. This can either be a specific u - Datatype: `string` - Default Value: `None` - - ### Drives A list of drives from the selected target resource. @@ -94,8 +84,6 @@ A list of drives from the selected target resource. - Datatype: `string` - Default Value: `None` - - ### Directory path The path of a directory that needs to be transformed. Includes all subdirectories by default @@ -104,10 +92,6 @@ The path of a directory that needs to be transformed. Includes all subdirectorie - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### Maximum amount of workers @@ -117,6 +101,3 @@ Specifies the maximum number of threads used for parallel execution of the workf - ID: `max_workers` - Datatype: `Long` - Default Value: `32` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_parameters-ParametersPlugin.md b/docs/build/reference/customtask/cmem_plugin_parameters-ParametersPlugin.md index 93bb07781..e8892a8ed 100644 --- a/docs/build/reference/customtask/cmem_plugin_parameters-ParametersPlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_parameters-ParametersPlugin.md @@ -40,16 +40,16 @@ limit: 5 ``` - ## Parameter ### Parameter Configuration -Your parameter configuration in YAML Syntax. One 'parameter: value' pair per line. url: http://example.org method: GET query: | SELECT ?s WHERE {{ ?s ?p ?o }} execute_once: True limit: 5 +Your parameter configuration in YAML Syntax. One 'parameter: value' pair per line. url: method: GET query: | SELECT ?s WHERE {{ ?s ?p ?o }} execute_once: True limit: 5 - ID: `parameters` - Datatype: `code-yaml` - Default Value: + ``` yaml url: http://example.org method: GET @@ -63,10 +63,6 @@ limit: 5 ``` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_pdf_extract-pdf_extract-PdfExtract.md b/docs/build/reference/customtask/cmem_plugin_pdf_extract-pdf_extract-PdfExtract.md index eb44cadd1..a5fd3405d 100644 --- a/docs/build/reference/customtask/cmem_plugin_pdf_extract-pdf_extract-PdfExtract.md +++ b/docs/build/reference/customtask/cmem_plugin_pdf_extract-pdf_extract-PdfExtract.md @@ -22,7 +22,6 @@ A task to extract text and tables from PDF files. The output is a JSON string on the path `pdf_extract_output`. The format depends on the ["Combine the results from all files into a single value"](#parameter_doc_all_files) parameter. - ### Output one entity/value per file ``` @@ -49,7 +48,6 @@ The output is a JSON string on the path `pdf_extract_output`. The format depends } ``` - ### Output one entity/value for all files ``` @@ -69,11 +67,10 @@ The output is a JSON string on the path `pdf_extract_output`. The format depends ## Input format This task can either work with project files when a regular expression is being used or with -entities coming from another task or dataset. +entities coming from another task or dataset. The input must be file entities following the [FileEntitySchema](https://github.com/eccenca/cmem-plugin-base/blob/main/cmem_plugin_base/dataintegration/typed_entities/file.py). If a regular expression is set, the input ports will close and no connection will be possible. - ## Parameters **File name regex filter** @@ -92,6 +89,7 @@ If set to "Combine", the results of all files will be combined into a single out **Error Handling Mode** Specifies how errors during PDF extraction should be handled. + - *Ignore*: Log errors and continue processing, returning empty or error-marked results. - *Raise on errors*: Raise an error when extraction fails. - *Raise on errors and warnings*: Treat any warning from the underlying PDF extraction module (pdfplumber) when extracting text and tables from pages as an error if empty results are returned. @@ -101,6 +99,7 @@ Specifies how errors during PDF extraction should be handled. Method used to detect tables in PDF pages. For further explanation click [here](https://github.com/jsvine/pdfplumber/blob/stable/README.md#extracting-tables). Available strategies include: + - *lines*: Uses detected lines in the PDF layout to find table boundaries. - *text*: Relies on text alignment and spacing. - *lattice*: Best for machine-generated perfect grids. @@ -113,9 +112,10 @@ Defines a custom table extraction strategy using YAML syntax. Only used if "cust **Text extraction strategy** -Method used to extract text in PDF pages. For further explanation click [here](https://github.com/jsvine/pdfplumber/blob/stable/README.md#extracting-text). +Method used to extract text in PDF pages. For further explanation click [here](https://github.com/jsvine/pdfplumber/blob/stable/README.md#extracting-text). Available strategies include: + - *default*: Balanced for most digital PDFs. - *raw*: Extract the PDFs with no merging of text fragments. - *scanned*: Best for scanned PDFs as it merges text more agressively. @@ -125,7 +125,6 @@ Available strategies include: Defines the maximum number of processes to use for concurrent file processing. By default, this is set to (number of virtual cores - 1). - ## Test regular expression Clicking the "Test regex pattern" button displays the files in the current project that match the regular expression @@ -133,7 +132,6 @@ specified with the ["File name regex filter"](#parameter_doc_regex) parameter. This does not display the files if there is another dataset or task connected to the input as the entities are not known before execution. - ## Parameter ### Combine the results from all files into a single value @@ -144,8 +142,6 @@ If set to 'Combine', the results of all files will be combined into a single out - Datatype: `string` - Default Value: `no_combine` - - ### Page selection Comma-separated page numbers or ranges (e.g., 1,2-5,7) for page selection. Files that do not contain any of the specified pages will return empty results with the information logged. If no page selection is specified, all pages will be processed. @@ -154,8 +150,6 @@ Comma-separated page numbers or ranges (e.g., 1,2-5,7) for page selection. Files - Datatype: `string` - Default Value: `None` - - ### Error Handling Mode The mode in which errors during the extraction are handled. If set to "Ignore", it will log errors and continue, returning empty or error-marked results for files. When "Raise on errors and warnings" is selected, any warning from the underlying PDF extraction module when extracting text and tables from pages is treated as an error if empty results are returned. @@ -164,8 +158,6 @@ The mode in which errors during the extraction are handled. If set to "Ignore", - Datatype: `string` - Default Value: `raise_on_error` - - ### Table extraction strategy Specifies the method used to detect tables in the PDF page. Options include "lines" and "text", each using different cues (such as lines or text alignment) to find tables. If "Custom" is selected, a custom setting needs to defined under advanced options. @@ -174,8 +166,6 @@ Specifies the method used to detect tables in the PDF page. Options include "lin - Datatype: `string` - Default Value: `lines` - - ### Text extraction strategy Specifies how text is extracted from a PDF page. Options include "raw", "layout", and others, each interpreting character positions and formatting differently to control how text is grouped and ordered. @@ -184,10 +174,6 @@ Specifies how text is extracted from a PDF page. Options include "raw", "layout" - Datatype: `string` - Default Value: `default` - - - - ## Advanced Parameter ### File name regex filter @@ -198,8 +184,6 @@ Regular expression for filtering resources of the project. If this parameter is - Datatype: `string` - Default Value: `None` - - ### Custom table extraction strategy Custom table extraction strategy in YAML format. @@ -207,6 +191,7 @@ Custom table extraction strategy in YAML format. - ID: `custom_table_strategy` - Datatype: `multiline string` - Default Value: + ``` text # edge_min_length: 3 # explicit_horizontal_lines: [] @@ -234,8 +219,6 @@ Custom table extraction strategy in YAML format. # vertical_strategy: lines ``` - - ### Custom_text_strategy Custom text extraction strategy in YAML format. @@ -243,6 +226,7 @@ Custom text extraction strategy in YAML format. - ID: `custom_text_strategy` - Datatype: `multiline string` - Default Value: + ``` text # extra_attrs: [] # horizontal_ltr: true @@ -257,8 +241,6 @@ Custom text extraction strategy in YAML format. # y_tolerance: 1 ``` - - ### Maximum number of processes for processing files The maximum number of processes to use for processing multiple files concurrently. The default is (number of virtual cores)-1. @@ -266,6 +248,3 @@ The maximum number of processes to use for processing multiple files concurrentl - ID: `max_processes` - Datatype: `Long` - Default Value: `9` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_pgvector-Search.md b/docs/build/reference/customtask/cmem_plugin_pgvector-Search.md index 2091e5f74..5221588da 100644 --- a/docs/build/reference/customtask/cmem_plugin_pgvector-Search.md +++ b/docs/build/reference/customtask/cmem_plugin_pgvector-Search.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task search for the top-k metadata stored into Postgres Vector Store. The incoming embedding entities are used to retrieve the nearest top-k @@ -42,7 +41,6 @@ The results in this output are structured like this: ] ``` - ## Parameter ### Database Host @@ -53,8 +51,6 @@ The hostname of the postgres database service. - Datatype: `string` - Default Value: `pgvector` - - ### Database Port The port number of the postgres database service. @@ -63,8 +59,6 @@ The port number of the postgres database service. - Datatype: `Long` - Default Value: `5432` - - ### Database User The account name used to login to the postgres database service. @@ -73,8 +67,6 @@ The account name used to login to the postgres database service. - Datatype: `string` - Default Value: `pgvector` - - ### Database Password The password of the database account. @@ -83,8 +75,6 @@ The password of the database account. - Datatype: `password` - Default Value: `None` - - ### Database Name The database name. @@ -93,8 +83,6 @@ The database name. - Datatype: `string` - Default Value: `pgvector` - - ### Collection Name The name of the collection that will be used for search. @@ -103,8 +91,6 @@ The name of the collection that will be used for search. - Datatype: `string` - Default Value: `None` - - ### Search Result Path The path containing the search result in the output entities. @@ -113,8 +99,6 @@ The path containing the search result in the output entities. - Datatype: `string` - Default Value: `_search_result` - - ### Embedding Query Path The path containing the embedding to be used for searching. @@ -123,8 +107,6 @@ The path containing the embedding to be used for searching. - Datatype: `string` - Default Value: `_embedding` - - ### Top-k The number of entries to be returned in the search result. @@ -133,10 +115,6 @@ The number of entries to be returned in the search result. - Datatype: `Long` - Default Value: `10` - - - - ## Advanced Parameter ### Distance Strategy @@ -146,6 +124,3 @@ The distance strategy to use. (default: COSINE) - ID: `distance_strategy` - Datatype: `enumeration` - Default Value: `COSINE` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_pgvector-Store.md b/docs/build/reference/customtask/cmem_plugin_pgvector-Store.md index db87b4a0c..f870237c9 100644 --- a/docs/build/reference/customtask/cmem_plugin_pgvector-Store.md +++ b/docs/build/reference/customtask/cmem_plugin_pgvector-Store.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This plugin workflow store embeddings into Postgres Vector Store. The vector embeddings and its respective metadata are going to be stored into a collection inside @@ -23,7 +22,6 @@ the Postgres Vector Store. It is possible to specify either the name of the attributes containing the vectors as well as the metadata. - ## Parameter ### Database Host @@ -34,8 +32,6 @@ The hostname of the postgres database service. - Datatype: `string` - Default Value: `pgvector` - - ### Database Port The port number of the postgres database service. @@ -44,8 +40,6 @@ The port number of the postgres database service. - Datatype: `Long` - Default Value: `5432` - - ### Database User The account name used to login to the postgres database service. @@ -54,8 +48,6 @@ The account name used to login to the postgres database service. - Datatype: `string` - Default Value: `pgvector` - - ### Database Password The password of the database account. @@ -64,8 +56,6 @@ The password of the database account. - Datatype: `password` - Default Value: `None` - - ### Database Name The database name. @@ -74,8 +64,6 @@ The database name. - Datatype: `string` - Default Value: `pgvector` - - ### Collection Name The name of the collection that will be used for search. @@ -84,8 +72,6 @@ The name of the collection that will be used for search. - Datatype: `string` - Default Value: `None` - - ### Pre Delete Collection If set to true, then the collection will removed at the beginning. @@ -94,10 +80,6 @@ If set to true, then the collection will removed at the beginning. - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter ### Source Path @@ -108,8 +90,6 @@ The name of the path to use for reading the embedding source. - Datatype: `string` - Default Value: `_embedding_source` - - ### Embedding Path The name of the path to use for reading the embeddings. @@ -118,8 +98,6 @@ The name of the path to use for reading the embeddings. - Datatype: `string` - Default Value: `_embedding` - - ### Metadata Paths The comma separated list path names to be used as metadata. Empty name means all paths (except embedding source and embedding) will be used @@ -128,8 +106,6 @@ The comma separated list path names to be used as metadata. Empty name means all - Datatype: `string` - Default Value: `None` - - ### Batch Processing Size The number of entries to be processed in batch. @@ -137,6 +113,3 @@ The number of entries to be processed in batch. - ID: `batch_processing_size` - Datatype: `Long` - Default Value: `100` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_project_resources-List.md b/docs/build/reference/customtask/cmem_plugin_project_resources-List.md index eb727dd7a..dbf0214cd 100644 --- a/docs/build/reference/customtask/cmem_plugin_project_resources-List.md +++ b/docs/build/reference/customtask/cmem_plugin_project_resources-List.md @@ -53,7 +53,6 @@ We recommend to test your regular expression before using it. [This deep-link](https://regex101.com/?testString=dataset.csv%0Amy-dataset.xml%0Ajson/example.json%0Ajson/example_new.json%0Ajson/data.xml®ex=.*new.*) provides a test bed using the example files and the last expression from the list. - ## Parameter ### File matching regex @@ -64,10 +63,6 @@ The regex for filtering the file names. The regex needs to match the full path ( - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_project_resources-UploadLocalFiles.md b/docs/build/reference/customtask/cmem_plugin_project_resources-UploadLocalFiles.md index da87e74a2..8e4efe9ba 100644 --- a/docs/build/reference/customtask/cmem_plugin_project_resources-UploadLocalFiles.md +++ b/docs/build/reference/customtask/cmem_plugin_project_resources-UploadLocalFiles.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This plugin allows you to upload multiple local files to the next workflow task. Be aware that only file based datasets can handle file entities (e.g. JSON, CSV). @@ -24,7 +23,6 @@ As an advanced option, you can change the working mode to UPLOAD_TO_PROJECT, whi allows for blindly adding files to the project space (with a consuming workflow task). Make sure to use always use the preview function to avoid overloading you project. - ## Parameter ### Directory @@ -35,8 +33,6 @@ The local directory where the files are located. - Datatype: `string` - Default Value: `None` - - ### File matching regex The regex for filtering the file names. The regex needs to fully match the local name without directory. @@ -45,10 +41,6 @@ The regex for filtering the file names. The regex needs to fully match the local - Datatype: `string` - Default Value: `.*` - - - - ## Advanced Parameter ### Working mode @@ -58,6 +50,3 @@ Which activity should be done with the selected local files. - ID: `working_mode` - Datatype: `enumeration` - Default Value: `SEND_TO_TASK` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md b/docs/build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md index f13deab9f..172227cd5 100644 --- a/docs/build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md +++ b/docs/build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This task executes a custom Salesforce Object Query (SOQL) and returns sets of tabular data from your organization's Salesforce account. @@ -35,10 +34,13 @@ SOQL Query. By default, this Parse SOQL option is set `True` (enabled). Examples: Retrieve all standard fields from all Lead resources. (without parser validation) + ``` SELECT FIELDS(STANDARD) FROM Lead ``` + Retrieve first name and last name of all Contact resources. (with parser validation) + ``` SELECT Contact.Firstname, Contact.Lastname FROM Contact ``` @@ -46,7 +48,6 @@ SELECT Contact.Firstname, Contact.Lastname FROM Contact Please refer to the [Salesforce Standard Objects list](https://developer.salesforce.com/docs/atlas.en-us.238.0.object_reference.meta/object_reference/sforce_api_objects_list.htm) of the Salesforce Platform data model in order to get an overview of the available objects and fields. - ## Parameter ### Username @@ -57,18 +58,12 @@ Username of the Salesforce Account. This is typically your email address. - Datatype: `string` - Default Value: `None` - - ### Password - - - ID: `password` - Datatype: `string` - Default Value: `None` - - ### Security Token In addition to your standard account credentials, you need to provide a security token to access your data. Refer to the [Salesforce Reset Token Documentation](https://help.salesforce.com/s/articleView?id=sf.user_security_token.htm&type=5) to learn how to retrieve or reset your token. @@ -77,8 +72,6 @@ In addition to your standard account credentials, you need to provide a security - Datatype: `string` - Default Value: `None` - - ### SOQL Query The query text of your SOQL query. SOQL uses the SELECT statement combined with filtering statements to return sets of data, which can optionally be ordered. For a complete description of the syntax, see [Salesforce SOQL SELECT Syntax](https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select.htm). @@ -87,10 +80,6 @@ The query text of your SOQL query. SOQL uses the SELECT statement combined with - Datatype: `multiline string` - Default Value: `None` - - - - ## Advanced Parameter ### Dataset @@ -100,6 +89,3 @@ In addition to have direct output of the fetched entities of your SOQL query, yo - ID: `dataset` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md b/docs/build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md index 39e57e857..cced08940 100644 --- a/docs/build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md +++ b/docs/build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md @@ -15,35 +15,37 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This task retrieves data from an incoming workflow task (such as a SPARQL query), and sends bulk API requests to the Salesforce Object API, in order to manipulate data in your organization's Salesforce account. The working model is: + - Each entity from the input data is interpreted as a single Salesforce object of the configured object type. - Each path from the input entity is interpreted as a field from the Salesforce data model (refer to the [Salesforce Standard Objects list](https://developer.salesforce.com/docs/atlas.en-us.238.0.object_reference.meta/object_reference/sforce_api_objects_list.htm)). - The special path `id` is used to identify an object in Salesforce and switch between update/creation mode, means: - - If there is NO id path available, a new object is created. - - If there IS an id path available, an update is done if the object exists. + - If there is NO id path available, a new object is created. + - If there IS an id path available, an update is done if the object exists. Example: + - You want to create new Lead objects based on data from a Knowledge Graph. - The [Lead Object Reference](https://developer.salesforce.com/docs/atlas.en-us.238.0.object_reference.meta/object_reference/sforce_api_objects_lead.htm) lists the supported fields, e.g. `FirstName`, `LastName` and `Email`. - Your input SPARQL task looks like this. Note that the variables need to match the field strings from the Salesforce data model: + ``` SELECT DISTINCT FirstName, LastName, Email ... ``` + - You select `Lead` as the Object API Name of this task and you connect both task in the workflow in order get the result of the SPARQL task as in input for this task. - For each SPARQL result, a new Lead is created. - ## Parameter ### Username @@ -54,18 +56,12 @@ Username of the Salesforce Account. This is typically your email address. - Datatype: `string` - Default Value: `None` - - ### Password - - - ID: `password` - Datatype: `string` - Default Value: `None` - - ### Security Token In addition to your standard account credentials, you need to provide a security token to access your data. Refer to the [Salesforce Reset Token Documentation](https://help.salesforce.com/s/articleView?id=sf.user_security_token.htm&type=5) to learn how to retrieve or reset your token. @@ -74,8 +70,6 @@ In addition to your standard account credentials, you need to provide a security - Datatype: `string` - Default Value: `None` - - ### Object API Name Salesforce Object API Name @@ -84,10 +78,6 @@ Salesforce Object API Name - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_shapes-plugin_shapes-ShapesPlugin.md b/docs/build/reference/customtask/cmem_plugin_shapes-plugin_shapes-ShapesPlugin.md index 061ee64bf..94cbab250 100644 --- a/docs/build/reference/customtask/cmem_plugin_shapes-plugin_shapes-ShapesPlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_shapes-plugin_shapes-ShapesPlugin.md @@ -16,7 +16,7 @@ tags: e.g. with cmemc. A task to generate SHACL node and property shapes from an instance data knowledge graph. - + ## Parameters **Input data graph** @@ -45,15 +45,16 @@ If the graph is not imported, the new shapes are not activated and used. **Fetch namespace prefixes from prefix.cc** -Fetch the list of namespace prefixes from https://prefix.cc instead of using the local prefix database. If unavailable, -fall back to the local database. Prefixes defined in the Corporate Memory project override database prefixes. Enabling this +Fetch the list of namespace prefixes from instead of using the local prefix database. If unavailable, +fall back to the local database. Prefixes defined in the Corporate Memory project override database prefixes. Enabling this option exposes your IP address to prefix.cc but no other data is shared. If unsure, keep this option disabled. See -https://prefix.cc/about. +. **Properties to ignore** Provide the list of properties (as IRIs) for which you do not want to create property shapes. Example: + ``` http://www.w3.org/1999/02/22-rdf-syntax-ns#type http://xmlns.com/foaf/0.1/familyName @@ -63,7 +64,6 @@ http://xmlns.com/foaf/0.1/familyName Add information about the plugin and plugin settings to the shapes graph. - ## Parameter ### Input data graph @@ -74,8 +74,6 @@ The knowledge graph containing the instance data to be analyzed for the SHACL sh - Datatype: `string` - Default Value: `None` - - ### Output shape catalog The knowledge graph the generated shapes will be added to. @@ -84,8 +82,6 @@ The knowledge graph the generated shapes will be added to. - Datatype: `string` - Default Value: `None` - - ### Output shape catalog label The label for the shape catalog graph. If no label is specified for a new shapes graph, a label will be generated. If no label is specified when adding to a shapes graph, the original label will be kept, or, if the existing graph does not have a label, a label will be generated. Only labels with language tag "en" or without language tag are considered. @@ -94,8 +90,6 @@ The label for the shape catalog graph. If no label is specified for a new shapes - Datatype: `string` - Default Value: `None` - - ### Handle existing output graph Add result to the existing graph (add result to graph), overwrite the existing graph with the result (replace existing graph with result), or stop the workflow if the output graph already exists (stop workflow if output graph exists). @@ -104,8 +98,6 @@ Add result to the existing graph (add result to graph), overwrite the existing g - Datatype: `string` - Default Value: `stop` - - ### Import the output graph into the central shapes catalog Import the SHACL shapes graph in the CMEM shapes catalog by adding an `owl:imports` statement to the central CMEM shapes catalog. If the graph is not imported, the new shapes are not activated and used. @@ -114,22 +106,16 @@ Import the SHACL shapes graph in the CMEM shapes catalog by adding an `owl:impor - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter ### Fetch namespace prefixes from prefix.cc -Fetch the list of namespace prefixes from https://prefix.cc instead of using the local prefix database. If unavailable, fall back to the local database. Prefixes defined in the Corporate Memory project override database prefixes. Enabling this option exposes your IP address to prefix.cc but no other data is shared. If unsure, keep this option disabled. See https://prefix.cc/about. +Fetch the list of namespace prefixes from instead of using the local prefix database. If unavailable, fall back to the local database. Prefixes defined in the Corporate Memory project override database prefixes. Enabling this option exposes your IP address to prefix.cc but no other data is shared. If unsure, keep this option disabled. See . - ID: `prefix_cc` - Datatype: `boolean` - Default Value: `false` - - ### Properties to ignore Provide the list of properties (as IRIs) to ignore. @@ -138,8 +124,6 @@ Provide the list of properties (as IRIs) to ignore. - Datatype: `multiline string` - Default Value: `http://www.w3.org/1999/02/22-rdf-syntax-ns#type` - - ### Include plugin provenance Add information about the plugin and plugin settings to the shapes graph. @@ -147,6 +131,3 @@ Add information about the plugin and plugin settings to the shapes graph. - ID: `plugin_provenance` - Datatype: `boolean` - Default Value: `false` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_splitfile-plugin_splitfile-SplitFilePlugin.md b/docs/build/reference/customtask/cmem_plugin_splitfile-plugin_splitfile-SplitFilePlugin.md index 90ba8e37f..54affc1fb 100644 --- a/docs/build/reference/customtask/cmem_plugin_splitfile-plugin_splitfile-SplitFilePlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_splitfile-plugin_splitfile-SplitFilePlugin.md @@ -52,7 +52,6 @@ If enabled, the "Internal projects directory" parameter has to be set. The path to the internal projects directory. If "Use internal projects directory" is disabled, this parameter has no effect. - ## Parameter ### Input filename @@ -63,8 +62,6 @@ The input file to be split. - Datatype: `string` - Default Value: `None` - - ### Chunk size The maximum size of the chunk files. @@ -73,8 +70,6 @@ The maximum size of the chunk files. - Datatype: `double` - Default Value: `None` - - ### Size unit The unit of the size value: kilobyte (KB), megabyte (MB), gigabyte (GB), or number of lines (Lines). @@ -83,8 +78,6 @@ The unit of the size value: kilobyte (KB), megabyte (MB), gigabyte (GB), or numb - Datatype: `string` - Default Value: `MB` - - ### Include header Include the header in each split. The first line of the input file is treated as the header. @@ -93,8 +86,6 @@ Include the header in each split. The first line of the input file is treated as - Datatype: `boolean` - Default Value: `false` - - ### Delete input file Delete the input file after splitting. @@ -103,10 +94,6 @@ Delete the input file after splitting. - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter ### Use internal projects directory @@ -117,8 +104,6 @@ Use the internal projects directory of DataIntegration to fetch and store files, - Datatype: `boolean` - Default Value: `false` - - ### Internal projects directory The path to the internal projects directory. If "Use internal projects directory" is disabled, this parameter has no effect. @@ -126,6 +111,3 @@ The path to the internal projects directory. If "Use internal projects directory - ID: `projects_path` - Datatype: `string` - Default Value: `/data/datalake` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_ssh-Download.md b/docs/build/reference/customtask/cmem_plugin_ssh-Download.md index 90be34b62..bddfb8845 100644 --- a/docs/build/reference/customtask/cmem_plugin_ssh-Download.md +++ b/docs/build/reference/customtask/cmem_plugin_ssh-Download.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task downloads files from a specified SSH instance. By providing the hostname, username, port and authentication method, you can specify the @@ -25,25 +24,27 @@ You can also define a regular expression to include or exclude specific files. There is also an option to prevent files in subfolders from being included. -#### Authentication Methods: +#### Authentication Methods + * **Password:** Only the password will be used for authentication. The private key field is ignored, even if filled. * **Key:** The private key will be used for authentication. If the key is encrypted, the password will be used to decrypt it. -#### Error handling modes: +#### Error handling modes + * **Ignore:** Ignores the permission rights of files and lists downloads all files it has access to. Skips folders and files when there is no correct permission. * **Warning:** Warns the user about files that the user has no permission rights to. Downloads all other files and skips files folder when there is no correct permission. * **Error:** Throws an error when there is a single file or folder with incorrect permission rights. -#### Note: +#### Note + * If a connection cannot be established within 20 seconds, a timeout occurs. * Currently supported key types are: RSA, DSS, ECDSA, Ed25519. * Setting the maximum amount of workers to more than 1 may cause a Channel Exception when the amount of files is too large - ## Parameter @@ -51,113 +52,88 @@ the amount of files is too large Hostname to connect to. Usually in the form of an IP address -- ID: `hostname` -- Datatype: `string` -- Default Value: `None` - - +* ID: `hostname` +* Datatype: `string` +* Default Value: `None` ### Port The port on which the connection will be tried on. Default is 22. -- ID: `port` -- Datatype: `Long` -- Default Value: `22` - - +* ID: `port` +* Datatype: `Long` +* Default Value: `22` ### Username The username with which a connection will be instantiated. -- ID: `username` -- Datatype: `string` -- Default Value: `None` - - +* ID: `username` +* Datatype: `string` +* Default Value: `None` ### Authentication method The method that is used to connect to the SSH server. -- ID: `authentication_method` -- Datatype: `string` -- Default Value: `password` - - +* ID: `authentication_method` +* Datatype: `string` +* Default Value: `password` ### Private key Your private key to connect via SSH. -- ID: `private_key` -- Datatype: `password` -- Default Value: `None` - - +* ID: `private_key` +* Datatype: `password` +* Default Value: `None` ### Password Depending on your authentication method this will either be used toconnect via password to SSH, or to decrypt the SSH private key -- ID: `password` -- Datatype: `password` -- Default Value: `None` - - +* ID: `password` +* Datatype: `password` +* Default Value: `None` ### Path The currently selected path within your SSH instance. Auto-completion starts from user home folder, use '..' for parent directory or '/' for root directory. -- ID: `path` -- Datatype: `string` -- Default Value: `None` - +* ID: `path` +* Datatype: `string` +* Default Value: `None` - -### Error handling for missing permissions. +### Error handling for missing permissions A choice on how to handle errors concerning the permissions rights.When choosing 'ignore' all files get skipped if the current user has correct permission rights.When choosing 'warning' all files get downloaded however there will be a mention that some of the files are not under the users permissionsif there are any and these get skipped.When choosing 'error' the files will not get downloaded if thereis even a single file the user has no access to. -- ID: `error_handling` -- Datatype: `string` -- Default Value: `error` - - +* ID: `error_handling` +* Datatype: `string` +* Default Value: `error` ### No subfolder When this flag is set, only files from the current directory will be downloaded. -- ID: `no_subfolder` -- Datatype: `boolean` -- Default Value: `false` - - +* ID: `no_subfolder` +* Datatype: `boolean` +* Default Value: `false` ### Regular expression A regular expression used to define which files will get downloaded. -- ID: `regex` -- Datatype: `string` -- Default Value: `^.*$` - - - - +* ID: `regex` +* Datatype: `string` +* Default Value: `^.*$` ## Advanced Parameter -### Maximum amount of workers. +### Maximum amount of workers Determines the amount of workers used for concurrent thread execution of the task. Default is 1, maximum is 32. Note that too many workers can cause a ChannelException. -- ID: `max_workers` -- Datatype: `Long` -- Default Value: `1` - - - +* ID: `max_workers` +* Datatype: `Long` +* Default Value: `1` diff --git a/docs/build/reference/customtask/cmem_plugin_ssh-Execute.md b/docs/build/reference/customtask/cmem_plugin_ssh-Execute.md index afc2dfa75..ca7eebb21 100644 --- a/docs/build/reference/customtask/cmem_plugin_ssh-Execute.md +++ b/docs/build/reference/customtask/cmem_plugin_ssh-Execute.md @@ -15,36 +15,37 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task executes commands on a given SSH instance. By providing the hostname, username, port and authentication method, you can specify the folder in which the command should be executed in. -#### Input Methods: +#### Input Methods + * **No input:** The command will be executed with no input attached to the plugin. Stdin is non-existent in this case. * **File input:** The command will be executed with the stdin being represented by the files that are connected via the input port of the plugin. This also allows for looping over multiple files executing the same command over them. +#### Output Methods -#### Output Methods: * **Structured process output:** The output will produce entities with its own schema including the stdout and stderr as well as the exit code to confirm the execution of the command. * **File output:** The stdout will be converted into a file a be provided for further use. * **No output:** The output port will be closed. -#### Authentication Methods: +#### Authentication Methods + * **Password:** Only the password will be used for authentication. The private key field is ignored, even if filled. * **Key:** The private key will be used for authentication. If the key is encrypted, the password will be used to decrypt it. -#### Note: +#### Note + * If a connection cannot be established within 20 seconds, a timeout occurs. * Currently supported key types are: RSA, DSS, ECDSA, Ed25519. - ## Parameter @@ -52,114 +53,90 @@ will be used to decrypt it. Hostname to connect to. Usually in the form of an IP address -- ID: `hostname` -- Datatype: `string` -- Default Value: `None` - - +* ID: `hostname` +* Datatype: `string` +* Default Value: `None` ### Port The port on which the connection will be tried on. Default is 22. -- ID: `port` -- Datatype: `Long` -- Default Value: `22` - - +* ID: `port` +* Datatype: `Long` +* Default Value: `22` ### Username The username with which a connection will be instantiated. -- ID: `username` -- Datatype: `string` -- Default Value: `None` - - +* ID: `username` +* Datatype: `string` +* Default Value: `None` ### Authentication method The method that is used to connect to the SSH server. -- ID: `authentication_method` -- Datatype: `string` -- Default Value: `password` - - +* ID: `authentication_method` +* Datatype: `string` +* Default Value: `password` ### Private key Your private key to connect via SSH. -- ID: `private_key` -- Datatype: `password` -- Default Value: `None` - - +* ID: `private_key` +* Datatype: `password` +* Default Value: `None` ### Password Depending on your authentication method this will either be used toconnect via password to SSH, or to decrypt the SSH private key -- ID: `password` -- Datatype: `password` -- Default Value: `None` - - +* ID: `password` +* Datatype: `password` +* Default Value: `None` ### Path The currently selected path within your SSH instance. Auto-completion starts from user home folder, use '..' for parent directory or '/' for root directory. -- ID: `path` -- Datatype: `string` -- Default Value: `None` - - +* ID: `path` +* Datatype: `string` +* Default Value: `None` ### Input method Parameter to decide whether files will be used as stdin or no input is needed. If 'File input' is chosen, the input port will open for all entities withthe FileEntitySchema. -- ID: `input_method` -- Datatype: `string` -- Default Value: `None` - - +* ID: `input_method` +* Datatype: `string` +* Default Value: `None` ### Output method Parameter to decide which type of output the user wants. This can be either no output, a structured process output with its own schema, or a file based output -- ID: `output_method` -- Datatype: `string` -- Default Value: `None` - - +* ID: `output_method` +* Datatype: `string` +* Default Value: `None` ### Command The command that will be executed on the SSH instance. When the inputmethod is set to 'File input', the command will be executed over these files. -- ID: `command` -- Datatype: `string` -- Default Value: `ls` - - +* ID: `command` +* Datatype: `string` +* Default Value: `ls` ### Timeout A timeout for the executed command. -- ID: `timeout` -- Datatype: `Long` -- Default Value: `0` - - - - +* ID: `timeout` +* Datatype: `Long` +* Default Value: `0` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_ssh-List.md b/docs/build/reference/customtask/cmem_plugin_ssh-List.md index a66642d48..a0e898c43 100644 --- a/docs/build/reference/customtask/cmem_plugin_ssh-List.md +++ b/docs/build/reference/customtask/cmem_plugin_ssh-List.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task generates structured output from a specified SSH instance. By providing the hostname, username, port and authentication method, you can specify the @@ -25,25 +24,27 @@ You can also define a regular expression to include or exclude specific files. There is also an option to prevent files in subfolders from being included. -#### Authentication Methods: +#### Authentication Methods + * **Password:** Only the password will be used for authentication. The private key field is ignored, even if filled. * **Key:** The private key will be used for authentication. If the key is encrypted, the password will be used to decrypt it. -#### Error handling modes: +#### Error handling modes + * **Ignore:** Ignores the permission rights of files and lists them all. Skips folders when there is no correct permission. * **Warning:** Warns the user about files that the user has no permission rights to. Lists all files and skips folder when there is no correct permission. * **Error:** Throws an error when there is a single file or folder with incorrect permission rights. -#### Note: +#### Note + * If a connection cannot be established within 20 seconds, a timeout occurs. * Currently supported key types are: RSA, DSS, ECDSA, Ed25519. * Setting the maximum amount of workers to more than 1 may cause a Channel Exception when the amount of files is too large - ## Parameter @@ -51,113 +52,88 @@ the amount of files is too large Hostname to connect to. Usually in the form of an IP address -- ID: `hostname` -- Datatype: `string` -- Default Value: `None` - - +* ID: `hostname` +* Datatype: `string` +* Default Value: `None` ### Port The port on which the connection will be tried on. Default is 22. -- ID: `port` -- Datatype: `Long` -- Default Value: `22` - - +* ID: `port` +* Datatype: `Long` +* Default Value: `22` ### Username The username with which a connection will be instantiated. -- ID: `username` -- Datatype: `string` -- Default Value: `None` - - +* ID: `username` +* Datatype: `string` +* Default Value: `None` ### Authentication method The method that is used to connect to the SSH server. -- ID: `authentication_method` -- Datatype: `string` -- Default Value: `password` - - +* ID: `authentication_method` +* Datatype: `string` +* Default Value: `password` ### Private key Your private key to connect via SSH. -- ID: `private_key` -- Datatype: `password` -- Default Value: `None` - - +* ID: `private_key` +* Datatype: `password` +* Default Value: `None` ### Password Depending on your authentication method this will either be used toconnect via password to SSH, or to decrypt the SSH private key -- ID: `password` -- Datatype: `password` -- Default Value: `None` - - +* ID: `password` +* Datatype: `password` +* Default Value: `None` ### Path The currently selected path within your SSH instance. Auto-completion starts from user home folder, use '..' for parent directory or '/' for root directory. -- ID: `path` -- Datatype: `string` -- Default Value: `None` - +* ID: `path` +* Datatype: `string` +* Default Value: `None` - -### Error handling for missing permissions. +### Error handling for missing permissions A choice on how to handle errors concerning the permissions rights.When choosing 'ignore' all files get listed regardless if the current user has correct permission rightsWhen choosing 'warning' all files get listed however there will be a mention that some of the files are not under the users permissionsif there are anyWhen choosing 'error' the files will not get listed if therethere are files the user has no access to. -- ID: `error_handling` -- Datatype: `string` -- Default Value: `error` - - +* ID: `error_handling` +* Datatype: `string` +* Default Value: `error` ### No subfolder When this flag is set, only files from the current directory will be listed. -- ID: `no_subfolder` -- Datatype: `boolean` -- Default Value: `false` - - +* ID: `no_subfolder` +* Datatype: `boolean` +* Default Value: `false` ### Regular expression A regular expression used to define which files will get listed. -- ID: `regex` -- Datatype: `string` -- Default Value: `^.*$` - - - - +* ID: `regex` +* Datatype: `string` +* Default Value: `^.*$` ## Advanced Parameter -### Maximum amount of workers. +### Maximum amount of workers Determines the amount of workers used for concurrent thread execution of the task. Default is 1, maximum is 32. Note that too many workers can cause a ChannelException. -- ID: `max_workers` -- Datatype: `Long` -- Default Value: `1` - - - +* ID: `max_workers` +* Datatype: `Long` +* Default Value: `1` diff --git a/docs/build/reference/customtask/cmem_plugin_ssh-Upload.md b/docs/build/reference/customtask/cmem_plugin_ssh-Upload.md index 580e16f43..8181dcb5a 100644 --- a/docs/build/reference/customtask/cmem_plugin_ssh-Upload.md +++ b/docs/build/reference/customtask/cmem_plugin_ssh-Upload.md @@ -15,22 +15,22 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task uploads files to a given SSH instance. By providing the hostname, username, port and authentication method, you can specify the folder the data should be uploaded to. -#### Authentication Methods: +#### Authentication Methods + * **Password:** Only the password will be used for authentication. The private key field is ignored, even if filled. * **Key:** The private key will be used for authentication. If the key is encrypted, the password will be used to decrypt it. -#### Note: +#### Note + * If a connection cannot be established within 20 seconds, a timeout occurs. * Currently supported key types are: RSA, DSS, ECDSA, Ed25519. - ## Parameter @@ -38,74 +38,58 @@ will be used to decrypt it. Hostname to connect to. Usually in the form of an IP address -- ID: `hostname` -- Datatype: `string` -- Default Value: `None` - - +* ID: `hostname` +* Datatype: `string` +* Default Value: `None` ### Port The port on which the connection will be tried on. Default is 22. -- ID: `port` -- Datatype: `Long` -- Default Value: `22` - - +* ID: `port` +* Datatype: `Long` +* Default Value: `22` ### Username The username with which a connection will be instantiated. -- ID: `username` -- Datatype: `string` -- Default Value: `None` - - +* ID: `username` +* Datatype: `string` +* Default Value: `None` ### Authentication method The method that is used to connect to the SSH server. -- ID: `authentication_method` -- Datatype: `string` -- Default Value: `password` - - +* ID: `authentication_method` +* Datatype: `string` +* Default Value: `password` ### Private key Your private key to connect via SSH. -- ID: `private_key` -- Datatype: `password` -- Default Value: `None` - - +* ID: `private_key` +* Datatype: `password` +* Default Value: `None` ### Password Depending on your authentication method this will either be used toconnect via password to SSH, or to decrypt the SSH private key -- ID: `password` -- Datatype: `password` -- Default Value: `None` - - +* ID: `password` +* Datatype: `password` +* Default Value: `None` ### Path The currently selected path within your SSH instance. Auto-completion starts from user home folder, use '..' for parent directory or '/' for root directory. -- ID: `path` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `path` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateEntities.md b/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateEntities.md index 6fcaeecb8..7c40a774e 100644 --- a/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateEntities.md +++ b/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateEntities.md @@ -53,29 +53,20 @@ The task can either: The error handling behavior is configurable through the `Fail on violations` parameter. - ## Parameter ### Source / Input Mode - - - ID: `source_mode` - Datatype: `string` - Default Value: `entities` - - ### Target / Output Mode - - - ID: `target_mode` - Datatype: `string` - Default Value: `entities` - - ### JSON Schema Dataset This dataset holds the JSON schema to use for validation. @@ -84,8 +75,6 @@ This dataset holds the JSON schema to use for validation. - Datatype: `string` - Default Value: `None` - - ### Fail on violations If enabled, the task will fail on the first data violation. @@ -94,10 +83,6 @@ If enabled, the task will fail on the first data violation. - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter ### Source JSON Dataset @@ -108,8 +93,6 @@ This dataset holds the resources you want to validate. - Datatype: `string` - Default Value: `None` - - ### Target JSON Dataset This dataset will be used to store the valid JSON objects after validation. @@ -117,6 +100,3 @@ This dataset will be used to store the valid JSON objects after validation. - ID: `target_dataset` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateGraph.md b/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateGraph.md index ee12d181b..b7664857c 100644 --- a/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateGraph.md +++ b/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateGraph.md @@ -15,11 +15,9 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - Start a graph validation process which verifies, that resources in a specific graph are valid according to the node shapes in a shape catalog graph. - ## Parameter ### Context Graph @@ -30,8 +28,6 @@ This graph holds the resources you want to validate. - Datatype: `string` - Default Value: `None` - - ### Shape graph This graph holds the shapes you want to use for validation. @@ -40,8 +36,6 @@ This graph holds the shapes you want to use for validation. - Datatype: `string` - Default Value: `https://vocab.eccenca.com/shacl/` - - ### Result graph In this graph, the validation results are materialized. If left empty, results are not materialized. @@ -50,40 +44,24 @@ In this graph, the validation results are materialized. If left empty, results a - Datatype: `string` - Default Value: `None` - - ### Clear result graph before validation - - - ID: `clear_result_graph` - Datatype: `boolean` - Default Value: `false` - - ### Fail workflow on violations - - - ID: `fail_on_violations` - Datatype: `boolean` - Default Value: `false` - - ### Output violations as entities - - - ID: `output_results` - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter ### Resource Selection Query @@ -93,12 +71,10 @@ The query to select the resources to validate. Use {{context_graph}} as a placeh - ID: `sparql_query` - Datatype: `code-sparql` - Default Value: + ``` sparql SELECT DISTINCT ?resource FROM <{{context_graph}}> WHERE { ?resource a ?class . FILTER isIRI(?resource) } ``` - - - diff --git a/docs/build/reference/customtask/cmem_plugin_wfreports_get_report.md b/docs/build/reference/customtask/cmem_plugin_wfreports_get_report.md index 82272ed37..c8c6897d8 100644 --- a/docs/build/reference/customtask/cmem_plugin_wfreports_get_report.md +++ b/docs/build/reference/customtask/cmem_plugin_wfreports_get_report.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow operator retrieves the most recent execution report of a specified workflow and outputs it as a JSON file. @@ -30,26 +29,20 @@ The plugin outputs a single JSON file entity containing the complete workflow ex ## Usage This operator is useful for: + - Monitoring workflow execution results - Creating audit trails of workflow runs - Archiving execution reports for compliance purposes - Feeding execution data into downstream analysis tasks - ## Parameter ### Workflow - - - ID: `workflow_id` - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/cmem_plugin_yaml-parse.md b/docs/build/reference/customtask/cmem_plugin_yaml-parse.md index d655a2703..6d6d81f1f 100644 --- a/docs/build/reference/customtask/cmem_plugin_yaml-parse.md +++ b/docs/build/reference/customtask/cmem_plugin_yaml-parse.md @@ -15,16 +15,17 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This workflow task parses YAML content from multiple sources and converts it to various output formats. **Input Sources:** + - **entities**: Parse YAML from input port entities in a workflow - **code**: Parse YAML from directly entered source code - **file**: Parse YAML from uploaded project file resources **Output Formats:** + - **entities**: Convert parsed structure to entities for workflow processing - **json_entities**: Output as single JSON entity to the output port - **json_dataset**: Save parsed structure directly to a JSON dataset @@ -33,41 +34,26 @@ The plugin provides flexible YAML-to-JSON conversion with configurable input sch types and paths for entity-based processing. It includes comprehensive validation and error handling for all supported modes. - ## Parameter ### Source / Input Mode - - - ID: `source_mode` - Datatype: `string` - Default Value: `code` - - ### Target / Output Mode - - - ID: `target_mode` - Datatype: `string` - Default Value: `entities` - - ### YAML Source Code (when using the *code* input) - - - ID: `source_code` - Datatype: `code-yaml` - Default Value: `# Add your YAML code here (and select 'code' as input mode).` - - - - ## Advanced Parameter ### YAML File (when using the *file* input) @@ -78,8 +64,6 @@ Which YAML file do you want to load into a JSON dataset? The dropdown shows file - Datatype: `string` - Default Value: `None` - - ### Target Dataset Where do you want to save the result of the conversion? The dropdown shows JSON datasets from the current project. @@ -88,8 +72,6 @@ Where do you want to save the result of the conversion? The dropdown shows JSON - Datatype: `string` - Default Value: `None` - - ### Input Schema Type / Class In case of source mode 'entities', you can specify the requested input type. @@ -98,8 +80,6 @@ In case of source mode 'entities', you can specify the requested input type. - Datatype: `string` - Default Value: `urn:x-eccenca:yaml-document` - - ### Input Schema Path / Property In case of source mode 'entities', you can specify the requested input path. @@ -107,6 +87,3 @@ In case of source mode 'entities', you can specify the requested input path. - ID: `input_schema_path` - Datatype: `string` - Default Value: `text` - - - diff --git a/docs/build/reference/customtask/combine-csv.md b/docs/build/reference/customtask/combine-csv.md index a94d12b95..2c428c125 100644 --- a/docs/build/reference/customtask/combine-csv.md +++ b/docs/build/reference/customtask/combine-csv.md @@ -28,8 +28,6 @@ Regular expression for filtering resources of the project. - Datatype: `string` - Default Value: `None` - - ### Delimiter Delimiter in the input CSV files. @@ -38,8 +36,6 @@ Delimiter in the input CSV files. - Datatype: `string` - Default Value: `,` - - ### Quotechar Quotechar in the input CSV files. @@ -48,8 +44,6 @@ Quotechar in the input CSV files. - Datatype: `string` - Default Value: `"` - - ### Skip rows The number of rows to skip before the header row. @@ -58,8 +52,6 @@ The number of rows to skip before the header row. - Datatype: `Long` - Default Value: `0` - - ### Stop workflow if result is empty Stop the workflow if no input files are found or all input files are empty. @@ -68,10 +60,6 @@ Stop the workflow if no input files are found or all input files are empty. - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/deleteProjectFiles.md b/docs/build/reference/customtask/deleteProjectFiles.md index 4b5c562c9..dd8172137 100644 --- a/docs/build/reference/customtask/deleteProjectFiles.md +++ b/docs/build/reference/customtask/deleteProjectFiles.md @@ -8,8 +8,6 @@ tags: # Delete project files - - Removes file resources from the project based on a regular expression (regex). The project-relative path of each file of the current project is tested against a user given regular expression and the file is deleted if the expression matches this name. The file names include the sub-directory structure if present but do not start with a `/`. The regular expression has to match the full path of the file and is case sensitive. @@ -33,7 +31,6 @@ Here are some regular expressions with the expected result: We recommend testing your regular expression before using it. [regex101.com](https://regex101.com) is a nice service to test your regular expressions. [This deep-link](https://regex101.com/?testString=dataset.csv%0Amy-dataset.xml%0Ajson/example.json%0Ajson/example_new.json%0Ajson/data.xml®ex=.*new.*) provides a test bed using the example files and the last expression from the list. - ## Parameter ### File matching regex @@ -44,8 +41,6 @@ The regex for filtering the file names. The regex needs to match the full path ( - Datatype: `string` - Default Value: `None` - - ### Output deleted files If enabled the operator outputs entities, one entity for each deleted file, with the path of the file as attribute 'filePath'. @@ -54,10 +49,6 @@ If enabled the operator outputs entities, one entity for each deleted file, with - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/downloadFile.md b/docs/build/reference/customtask/downloadFile.md index 518333194..b9c542c9e 100644 --- a/docs/build/reference/customtask/downloadFile.md +++ b/docs/build/reference/customtask/downloadFile.md @@ -8,11 +8,8 @@ tags: # Download file - - Downloads a file from a given URL. - ## Parameter ### URL @@ -23,8 +20,6 @@ The URL of the file to be downloaded. - Datatype: `string` - Default Value: `None` - - ### Accept The accept header String. @@ -33,8 +28,6 @@ The accept header String. - Datatype: `string` - Default Value: `None` - - ### Request timeout Request timeout in ms. The overall maximum time the request should take. @@ -43,8 +36,6 @@ Request timeout in ms. The overall maximum time the request should take. - Datatype: `int` - Default Value: `10000` - - ### Connection timeout Connection timeout in ms. The time until which a connection with the remote end must be established. @@ -53,8 +44,6 @@ Connection timeout in ms. The time until which a connection with the remote end - Datatype: `int` - Default Value: `5000` - - ### Read timeout Read timeout in ms. The max. time a request stays idle, i.e. no data is send or received. @@ -63,8 +52,6 @@ Read timeout in ms. The max. time a request stays idle, i.e. no data is send or - Datatype: `int` - Default Value: `10000` - - ### HTTP headers Configure additional HTTP headers. One header per line. Each header entry follows the curl syntax. @@ -73,8 +60,6 @@ Configure additional HTTP headers. One header per line. Each header entry follow - Datatype: `multiline string` - Default Value: `None` - - ### Authorization header The authorization header. This is usually either 'Authorization' or 'Proxy-Authorization'If left empty, no authorization header is sent. @@ -83,8 +68,6 @@ The authorization header. This is usually either 'Authorization' or 'Proxy-Autho - Datatype: `string` - Default Value: `None` - - ### Authorization header value The authorization header value. Usually this has the form 'type secret', e.g. for OAuth 'bearer .'This config parameter will be encrypted in the backend. @@ -93,10 +76,6 @@ The authorization header value. Usually this has the form 'type secret', e.g. fo - Datatype: `password` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/eccencaDataPlatformGraphStoreFileUploadOperator.md b/docs/build/reference/customtask/eccencaDataPlatformGraphStoreFileUploadOperator.md index d75f756ad..0cb4b7c41 100644 --- a/docs/build/reference/customtask/eccencaDataPlatformGraphStoreFileUploadOperator.md +++ b/docs/build/reference/customtask/eccencaDataPlatformGraphStoreFileUploadOperator.md @@ -8,11 +8,8 @@ tags: # Upload File to Knowledge Graph - - Uploads an N-Triples or Turtle (limited support) file from the file repository to a 'Knowledge Graph' dataset. The output of this operatorcan be the input of datasets that support graph store file upload, e.g. 'Knowledge Graph'. The file will be uploaded to the graph specified in that dataset. - ## Parameter ### RDF resource @@ -23,8 +20,6 @@ RDF file (N-Triples or Turtle) from the resource repository that should be uploa - Datatype: `resource` - Default Value: `None` - - ### Max chunk size (MB) The N-Triples file will be split into multiple chunks if the file size exceeds the max chunk size. For Turtle files this parameter is ignored since no chunking is supported. @@ -33,8 +28,6 @@ The N-Triples file will be split into multiple chunks if the file size exceeds t - Datatype: `option[int]` - Default Value: `None` - - ### Content type The MIME type of the serialization format of the RDF file. @@ -43,10 +36,6 @@ The MIME type of the serialization format of the RDF file. - Datatype: `enumeration` - Default Value: `application/n-triples` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/eccencaRestOperator.md b/docs/build/reference/customtask/eccencaRestOperator.md index cc2adb2e6..77a6a942d 100644 --- a/docs/build/reference/customtask/eccencaRestOperator.md +++ b/docs/build/reference/customtask/eccencaRestOperator.md @@ -8,12 +8,10 @@ tags: # Execute REST requests - - ## Core parameter overview - `URL`: The URL the request will be executed against. This value can be overwritten at execution time when the 'Read parameters from input' option - is enabled. This value will also be adapted when a paging approach is configured, see the paging section for more details. + is enabled. This value will also be adapted when a paging approach is configured, see the paging section for more details. - `Method`: One of the following HTTP methods: GET, POST, PUT, PATCH or DELETE. - `Accept`: The ACCEPT header value for content negotiation, e.g. 'application/json'. - `Content type`: The CONTENT-TYPE header value. This is usually used for POST, PUT or PATCH requests when the API endpoint @@ -72,6 +70,7 @@ to fetch all results. This is currently only supported for JSON requests. } } ``` + - `Next page ID query parameter`: If the paging method is 'Next page identifier', this defines the query parameter name that should be attached to the original request URL in combination with the 'next page' value of the current response in order to request the next page. @@ -79,6 +78,7 @@ to fetch all results. This is currently only supported for JSON requests. ## Setting HTTP headers - `HTTP headers`: This parameter allows to set HTTP headers of the request being made. Each line of the multi-line value should contain a single header, e.g. + ``` Accept-Language: en-US,en;q=0.5 Cache-Control: max-age=0 @@ -104,7 +104,7 @@ a single, merged file (only supported for JSON) or to a ZIP archive, i.e. a file In the latter case an entry per request is added to the ZIP file. Currently, the following datasets support the processing of ZIP files: JSON, XML, CSV and RDF file. -- `Output result as file`: If enabled, instead of outputting a single entity, the result/s will be written directly +- `Output result as file`: If enabled, instead of outputting a single entity, the result/s will be written directly to the file of the file-based dataset that is connected to the output of this operator. If the option 'Read parameters from input' is enabled, it is currently always assumed that multiple requests will be sent. @@ -136,7 +136,7 @@ Following parameters can be tuned in order to decide when an execution should be request configuration as failed. Default: `3` - `Abort when request fails`: When enabled, if a single request configuration eventually fails, i.e. it reaches its max. retry count, the overall execution of the REST operator will fail. -- `Max failed requests`: If set to a value greater 0, the execution will abort if more than the given number of request configurations +- `Max failed requests`: If set to a value greater 0, the execution will abort if more than the given number of request configurations have failed (reached max. retries). This can be used if a number of failed requests can be tolerated. When 'Abort when request fails' is enabled, this option is ignored. @@ -148,7 +148,6 @@ If having the request URL in the response data is needed, following parameter ne a property with the specified name in the root level of the response JSON object. This is mostly relevant if the request URL cannot be re-constructed from the response data. Only supported for JSON responses. - ## Parameter ### URL @@ -159,8 +158,6 @@ The URL to execute this request against. This can be overwritten at execution ti - Datatype: `string` - Default Value: `None` - - ### Method One of the following HTTP methods: GET, POST, PUT, PATCH or DELETE. @@ -169,8 +166,6 @@ One of the following HTTP methods: GET, POST, PUT, PATCH or DELETE. - Datatype: `enumeration` - Default Value: `GET` - - ### Accept The accept header String. @@ -179,8 +174,6 @@ The accept header String. - Datatype: `string` - Default Value: `None` - - ### Request timeout Request timeout in ms. The overall maximum time the request should take. @@ -189,8 +182,6 @@ Request timeout in ms. The overall maximum time the request should take. - Datatype: `int` - Default Value: `10000` - - ### Connection timeout Connection timeout in ms. The time until which a connection with the remote end must be established. @@ -199,8 +190,6 @@ Connection timeout in ms. The time until which a connection with the remote end - Datatype: `int` - Default Value: `5000` - - ### Read timeout Read timeout in ms. The max. time a request stays idle, i.e. no data is send or received. @@ -209,8 +198,6 @@ Read timeout in ms. The max. time a request stays idle, i.e. no data is send or - Datatype: `int` - Default Value: `10000` - - ### Content type The content-type header String. This can be set in case of PUT or POST. If another content type comes back, the task will fail. @@ -219,8 +206,6 @@ The content-type header String. This can be set in case of PUT or POST. If anoth - Datatype: `string` - Default Value: `None` - - ### Content The content that is send with a POST, PUT or PATCH request. For handling this payload dynamically this parameter must be overwritten via the task input. @@ -229,8 +214,6 @@ The content that is send with a POST, PUT or PATCH request. For handling this pa - Datatype: `string` - Default Value: `None` - - ### HTTP headers Configure additional HTTP headers. One header per line. Each header entry follows the curl syntax. @@ -239,8 +222,6 @@ Configure additional HTTP headers. One header per line. Each header entry follow - Datatype: `multiline string` - Default Value: `None` - - ### Read parameters from input If this is set to true, specific parameters can be overwritten at execution time and one request per overwrite config will be executed. Else inputs are ignored and exactly one request will be executed. Parameters that can currently be overwritten: url, content @@ -249,8 +230,6 @@ If this is set to true, specific parameters can be overwritten at execution time - Datatype: `boolean` - Default Value: `false` - - ### Multi-part file parameter If set to a non-empty String then instead of a normal POST a multipart/form-data file upload request is executed. This value is used as the form parameter name. @@ -259,8 +238,6 @@ If set to a non-empty String then instead of a normal POST a multipart/form-data - Datatype: `string` - Default Value: `None` - - ### Authorization header The authorization header. This is usually either 'Authorization' or 'Proxy-Authorization'If left empty, no authorization header is sent. @@ -269,8 +246,6 @@ The authorization header. This is usually either 'Authorization' or 'Proxy-Autho - Datatype: `string` - Default Value: `None` - - ### Authorization header value The authorization header value. Usually this has the form 'type secret', e.g. for OAuth 'bearer .'This config parameter will be encrypted in the backend. @@ -279,8 +254,6 @@ The authorization header value. Usually this has the form 'type secret', e.g. fo - Datatype: `password` - Default Value: `None` - - ### Delay between requests The delay between requests in milliseconds. @@ -289,8 +262,6 @@ The delay between requests in milliseconds. - Datatype: `int` - Default Value: `0` - - ### Retries per request How often should a single request be retried if it fails. @@ -299,8 +270,6 @@ How often should a single request be retried if it fails. - Datatype: `int` - Default Value: `3` - - ### Abort when request fails If a single request fails, i.e. it reaches its max. retry count, should the execution then be aborted or the next requests be executed. @@ -309,8 +278,6 @@ If a single request fails, i.e. it reaches its max. retry count, should the exec - Datatype: `boolean` - Default Value: `true` - - ### Limit If this is set to a number greater 0, then only this number of input REST configurations will be executed. Mainly used for debugging and executing a subset. @@ -319,8 +286,6 @@ If this is set to a number greater 0, then only this number of input REST config - Datatype: `int` - Default Value: `0` - - ### Offset How many input entries to skip. @@ -329,8 +294,6 @@ How many input entries to skip. - Datatype: `int` - Default Value: `0` - - ### Max failed requests If set to greater 0, then the execution will abort if more than the given number of requests have failed. This should be used to fail early. If 'abort on request fail' is set to true, then this option has no effect. @@ -339,8 +302,6 @@ If set to greater 0, then the execution will abort if more than the given number - Datatype: `int` - Default Value: `0` - - ### Paging method There are two paging methods currently supported: 1. Next page full URL: The JSON response contains the full URL of the next page. This URL will be used for the subsequent request. 2. Next page identifier: The JSON response contains the ID of the next page. This ID will be used as query parameter for the subsequent request. In both cases the path to the next page value in the response JSON must be defined via the 'Next page JSON path' parameter. In case of the 'Identifier next page parameter' paging method, also the parameter 'Next page ID query parameter' must be set. @@ -349,8 +310,6 @@ There are two paging methods currently supported: 1. Next page full URL: The JSO - Datatype: `enumeration` - Default Value: `none` - - ### Next page JSON path The path to the JSON value containing the next page value of the JSON response, e.g. paging/next. The path syntax follows the Silk path syntax, but only allows forward paths. @@ -359,8 +318,6 @@ The path to the JSON value containing the next page value of the JSON response, - Datatype: `string` - Default Value: `None` - - ### Next page ID query parameter The query parameter name for the next page ID that should be attached to the next page URI request. This is necessary for the 'Next page identifier' paging method. @@ -369,8 +326,6 @@ The query parameter name for the next page ID that should be attached to the nex - Datatype: `string` - Default Value: `None` - - ### Output result as file If a file based dataset is connected to the output of the REST operator, then this option can be enabled in order to overwrite the file resource of the connected dataset. This allows for handling the result of the REST request/s as a normal dataset. If a non-file based dataset is connected to this operator the execution will fail. If disabled, a single entity with a single property 'result' will be output that contains the (merged) result. @@ -379,8 +334,6 @@ If a file based dataset is connected to the output of the REST operator, then th - Datatype: `boolean` - Default Value: `false` - - ### URL property If this is non-empty, a property is created in the root JSON object (if it exists) with the same name that has the request URL as value. This is mostly relevant if the request URL cannot be re-constructed from the response data. Only supported for JSON response data. @@ -389,10 +342,6 @@ If this is non-empty, a property is created in the root JSON object (if it exist - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/getProjectFiles.md b/docs/build/reference/customtask/getProjectFiles.md index 430afc5f3..741a7f6eb 100644 --- a/docs/build/reference/customtask/getProjectFiles.md +++ b/docs/build/reference/customtask/getProjectFiles.md @@ -8,11 +8,8 @@ tags: # Get project files - - Get file resources from the project. - ## Parameter ### File name @@ -23,8 +20,6 @@ The path of the project file to retrieve. Leave empty if the file regex paramete - Datatype: `string` - Default Value: `None` - - ### Files regex Optional regular expression for retrieving files. The regex needs to match the full path (i.e. from beginning to end, including sub-directories). @@ -33,8 +28,6 @@ Optional regular expression for retrieving files. The regex needs to match the f - Datatype: `string` - Default Value: `None` - - ### MIME type Optional MIME type to assign to all retrieved files. If left empty, the MIME type will be not be set. @@ -43,10 +36,6 @@ Optional MIME type to assign to all retrieved files. If left empty, the MIME typ - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/setParameters.md b/docs/build/reference/customtask/setParameters.md index d6e7a395b..549105168 100644 --- a/docs/build/reference/customtask/setParameters.md +++ b/docs/build/reference/customtask/setParameters.md @@ -8,11 +8,8 @@ tags: # Set parameters - - Set and overwrite parameters of a task. - ## Parameter ### Output Task @@ -23,8 +20,6 @@ The output task for key autocompletion - Datatype: `option[identifier]` - Default Value: `None` - - ### Parameters The parameters to set. @@ -33,10 +28,6 @@ The parameters to set. - Datatype: `keyValuePairs` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/shacl-pyshacl.md b/docs/build/reference/customtask/shacl-pyshacl.md index e2da94a47..ad2c264a6 100644 --- a/docs/build/reference/customtask/shacl-pyshacl.md +++ b/docs/build/reference/customtask/shacl-pyshacl.md @@ -27,8 +27,6 @@ The URI of the graph to be validated. The graph URI is selected from a list of g - Datatype: `string` - Default Value: `None` - - ### SHACL shapes graph URI The URI of the graph containing the SHACL shapes to be validated against. The graph URI is selected from a list of graphs of type `shui:ShapeCatalog`. @@ -37,8 +35,6 @@ The URI of the graph containing the SHACL shapes to be validated against. The gr - Datatype: `string` - Default Value: `None` - - ### Generate validation graph If enabled, the validation graph is posted to the CMEM instance with the graph URI specified with the `Validation graph URI` option. @@ -47,8 +43,6 @@ If enabled, the validation graph is posted to the CMEM instance with the graph U - Datatype: `boolean` - Default Value: `false` - - ### Validation graph URI If the `Generate validation graph` option is enabled the validation graph is posted to the CMEM instance with this graph URI. @@ -57,8 +51,6 @@ If the `Generate validation graph` option is enabled the validation graph is pos - Datatype: `string` - Default Value: `None` - - ### Output entities If enabled, the plugin outputs the validation results as entities and can be connected to, for instance, a CSV dataset to produce a results table. @@ -67,8 +59,6 @@ If enabled, the plugin outputs the validation results as entities and can be con - Datatype: `boolean` - Default Value: `false` - - ### Clear validation graph If enabled, the validation graph is cleared before workflow execution. @@ -77,10 +67,6 @@ If enabled, the validation graph is cleared before workflow execution. - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter ### Ontology graph URI @@ -91,8 +77,6 @@ The URI of a graph containing extra ontological information. RDFS and OWL defini - Datatype: `string` - Default Value: `None` - - ### Resolve owl:imports If enabled, the graph tree defined with owl:imports in the data graph is resolved. @@ -101,8 +85,6 @@ If enabled, the graph tree defined with owl:imports in the data graph is resolve - Datatype: `boolean` - Default Value: `true` - - ### Blank node skolemization If enabled, blank nodes in the validation graph are skolemized into URIs. @@ -111,8 +93,6 @@ If enabled, blank nodes in the validation graph are skolemized into URIs. - Datatype: `boolean` - Default Value: `true` - - ### Add labels If enabled, `rdfs:label` triples are added to the validation graph for instances of `sh:ValidationReport` and `sh:ValidationResult`. @@ -121,8 +101,6 @@ If enabled, `rdfs:label` triples are added to the validation graph for instances - Datatype: `boolean` - Default Value: `true` - - ### Add labels to focus nodes and values If enabled along with the `Add labels` option, `rdfs:label` triples are added for the focus nodes, values and SHACL shapes in the validation graph. The labels are taken from the specified data and SHACL graphs. @@ -131,9 +109,7 @@ If enabled along with the `Add labels` option, `rdfs:label` triples are added fo - Datatype: `boolean` - Default Value: `false` - - -### Add shui:conforms flag to focus node resources. +### Add shui:conforms flag to focus node resources If enabled, `shui:conforms false` triples are added to the focus nodes in the validation graph. @@ -141,8 +117,6 @@ If enabled, `shui:conforms false` triples are added to the focus nodes in the va - Datatype: `boolean` - Default Value: `false` - - ### Meta-SHACL If enabled, the SHACL shapes graph is validated against the SHACL-SHACL shapes graph before validating the data graph. @@ -151,8 +125,6 @@ If enabled, the SHACL shapes graph is validated against the SHACL-SHACL shapes g - Datatype: `boolean` - Default Value: `false` - - ### Inference If enabled, OWL inferencing expansion of the data graph is performed before validation. Options are RDFS, OWLRL, Both, None. @@ -161,8 +133,6 @@ If enabled, OWL inferencing expansion of the data graph is performed before vali - Datatype: `string` - Default Value: `none` - - ### SHACL advanced features Enable SHACL advanced features. @@ -171,8 +141,6 @@ Enable SHACL advanced features. - Datatype: `boolean` - Default Value: `false` - - ### SHACL-JS features Enable SHACL-JS features. @@ -181,9 +149,7 @@ Enable SHACL-JS features. - Datatype: `boolean` - Default Value: `false` - - -### Remove graph type http://rdfs.org/ns/void#Dataset from data graph +### Remove graph type from data graph Before validating, remove the triple ` a ` from the in-memory data graph. @@ -191,9 +157,7 @@ Before validating, remove the triple ` a from data graph Before validating, remove the triple ` a ` from the in-memory data graph. @@ -201,9 +165,7 @@ Before validating, remove the triple ` a from data graph Before validating, remove the triple ` a ` from the in-memory data graph. @@ -211,8 +173,6 @@ Before validating, remove the triple ` a - - A task that executes a SPARQL Construct query on a SPARQL enabled data source and outputs the SPARQL result. If the result should be written to the same RDF store it is read from, the SPARQL Update operator is preferable. - ## Parameter ### Construct query @@ -23,8 +20,6 @@ A SPARQL 1.1 construct query - Datatype: `code-sparql` - Default Value: `None` - - ### Use temporary file When copying directly to the same SPARQL Endpoint or when copying large amounts of triples, set to True by default @@ -33,10 +28,6 @@ When copying directly to the same SPARQL Endpoint or when copying large amounts - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/sparqlSelectOperator.md b/docs/build/reference/customtask/sparqlSelectOperator.md index 976d1db42..1d14427aa 100644 --- a/docs/build/reference/customtask/sparqlSelectOperator.md +++ b/docs/build/reference/customtask/sparqlSelectOperator.md @@ -8,8 +8,6 @@ tags: # SPARQL Select query - - The SPARQL SELECT plugin is a task for executing SPARQL SELECT queries on the input RDF data source. ## Description @@ -41,7 +39,6 @@ Other types of RDF tasks are the `sparqlCopyOperator` for executing SPARQL CONST Regarding the input dataset, any RDF dataset is acceptable. For further details on the RDF datasets, see for example the documentation of the `sparqlEndpoint` plugin. - ## Parameter ### Select query @@ -52,8 +49,6 @@ A SPARQL 1.1 select query - Datatype: `code-sparql` - Default Value: `None` - - ### Result limit If set to a positive integer, the number of results is limited @@ -62,8 +57,6 @@ If set to a positive integer, the number of results is limited - Datatype: `string` - Default Value: `None` - - ### Optional SPARQL dataset An optional SPARQL dataset that can be used for example data, so e.g. the transformation editor shows mapping examples. @@ -72,8 +65,6 @@ An optional SPARQL dataset that can be used for example data, so e.g. the transf - Datatype: `SPARQL endpoint` - Default Value: `None` - - ### SPARQL query timeout (ms) SPARQL query timeout (select/update) in milliseconds. A value of zero means that there is no timeout set explicitly. If a value greater zero is specified this overwrites possible default timeouts. @@ -82,10 +73,6 @@ SPARQL query timeout (select/update) in milliseconds. A value of zero means that - Datatype: `int` - Default Value: `0` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/sparqlUpdateOperator.md b/docs/build/reference/customtask/sparqlUpdateOperator.md index 67d222a3b..e0ec0d1ca 100644 --- a/docs/build/reference/customtask/sparqlUpdateOperator.md +++ b/docs/build/reference/customtask/sparqlUpdateOperator.md @@ -8,8 +8,6 @@ tags: # SPARQL Update query - - The SPARQL UPDATE query plugin is a task for outputting SPARQL UPDATE queries from the input RDF data source. ## Description @@ -65,7 +63,7 @@ The methods `uri`, `plainLiteral` and `rawUnsafe` throw an exception if no input In addition to input values, properties of the input and output tasks can be accessed via the `inputProperties` and `outputProperties` objects in the same way as the `row` object. For example with `$inputProperties.uri("graph")`. -For more information about the Velocity Engine, visit http://velocity.apache.org. +For more information about the Velocity Engine, visit . ### Internal Specifics @@ -79,7 +77,6 @@ possible output datasets could be an **in-memory dataset** or a **Knowledge Grap `eccencaDataPlatform` plugin, which is the flagship RDF dataset of [Corporate Memory](https://eccenca.com/products/enterprise-knowledge-graph-platform-corporate-memory). - ## Parameter ### SPARQL update query @@ -90,8 +87,6 @@ The SPARQL UPDATE template for constructing SPARQL UPDATE queries for every enti - Datatype: `code-sparql` - Default Value: `None` - - ### Batch size How many entities should be handled in a single update request. @@ -100,8 +95,6 @@ How many entities should be handled in a single update request. - Datatype: `int` - Default Value: `1` - - ### Templating mode The templating mode for the template engine. The possible values are `Simple` and `Velocity Engine`. See the general documentation of this plugin for further details on the features of each template engine. @@ -110,10 +103,6 @@ The templating mode for the template engine. The possible values are `Simple` an - Datatype: `enumeration` - Default Value: `simple` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/sqlUpdateQueryOperator.md b/docs/build/reference/customtask/sqlUpdateQueryOperator.md index b54d27c9b..d449b3a11 100644 --- a/docs/build/reference/customtask/sqlUpdateQueryOperator.md +++ b/docs/build/reference/customtask/sqlUpdateQueryOperator.md @@ -8,12 +8,9 @@ tags: # SQL Update query - - A task that outputs SQL queries. The output of this operator should be connected to a remote SQL endpoint on which queries should be executed. - ## Parameter ### SQL query @@ -24,10 +21,6 @@ The SQL query to be executed - Datatype: `code-sql` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/tripleRequestOperator.md b/docs/build/reference/customtask/tripleRequestOperator.md index bc30f9df7..4ca2d983b 100644 --- a/docs/build/reference/customtask/tripleRequestOperator.md +++ b/docs/build/reference/customtask/tripleRequestOperator.md @@ -8,15 +8,12 @@ tags: # Request RDF triples - - A task that requests all triples from an RDF dataset. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/ucumNormalizationTask.md b/docs/build/reference/customtask/ucumNormalizationTask.md index 729d7f921..dc24f6c42 100644 --- a/docs/build/reference/customtask/ucumNormalizationTask.md +++ b/docs/build/reference/customtask/ucumNormalizationTask.md @@ -8,8 +8,6 @@ tags: # Normalize units of measurement - - This custom task substitutes numeric values and pertaining units by its normalized representation in the International System of Units (SI). The resulting representation consists of the following three columns: @@ -18,7 +16,6 @@ The resulting representation consists of the following three columns: 2. The unit symbol of the International System of Units (SI) pertaining to the value. 3. The original unit symbol, from which it was normalized. This information is kept to be able to reverse this action. - ## Parameter ### Value properties @@ -29,8 +26,6 @@ The names (comma-separated) of columns containing numeric values interpreted as - Datatype: `string` - Default Value: `None` - - ### Unit property The names (comma-separated) of dedicated columns containing the unit symbol for the pertaining value in the value column (the positions in this list have to align with the pertaining value columns). Either this param or 'static unit' has to be set. @@ -39,8 +34,6 @@ The names (comma-separated) of dedicated columns containing the unit symbol for - Datatype: `string` - Default Value: `None` - - ### Static units Unit symbols (comma-separated) defining the unit for all values in the pertaining value column. If set, the 'unitProperty' param will be ignored and all values of the value column have to be numbers without unit symbols (the positions in this list have to align with the pertaining value columns). @@ -49,8 +42,6 @@ Unit symbols (comma-separated) defining the unit for all values in the pertainin - Datatype: `string` - Default Value: `None` - - ### Target units Unit symbols (comma-separated) defining the target unit to which the value column will be converted (Note: Make sure the input unit can be converted to the target unit). By default the pertaining SI-base unit will be used as normalization unit (the positions in this list have to align with the pertaining value columns) @@ -59,8 +50,6 @@ Unit symbols (comma-separated) defining the target unit to which the value colum - Datatype: `string` - Default Value: `None` - - ### Suppress errors If true, will ignore any parsing or value conversion error and return an empty result (might happen because of unknown unit symbols or non-numbers as values). Beware, the value will be lost completely! @@ -69,8 +58,6 @@ If true, will ignore any parsing or value conversion error and return an empty r - Datatype: `boolean` - Default Value: `false` - - ### Configuration file path An absolute file path for a unit CSV configuration file (for syntax see 'configuration' param). If set, the 'configuration' param will be ignored. @@ -79,8 +66,6 @@ An absolute file path for a unit CSV configuration file (for syntax see 'configu - Datatype: `resource` - Default Value: `None` - - ### Configuration While all SI units and decimal prefixes are supported by default, custom or obsolete units have to be added via this configuration. NOTE: when constructing formulae depending on other units defined in the configuration, make sure to order them dependently. ALSO: Rational numbers are not supported by the UCUM syntax, express them as a fraction (see 'grain' example below). @@ -88,6 +73,7 @@ While all SI units and decimal prefixes are supported by default, custom or obso - ID: `configuration` - Datatype: `multiline string` - Default Value: + ``` text # Example configuration, don't forget to remove the '#' in front of each row. @@ -106,10 +92,6 @@ While all SI units and decimal prefixes are supported by default, custom or obso ``` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/validateXsdOperator.md b/docs/build/reference/customtask/validateXsdOperator.md index 923bc126a..0de908b80 100644 --- a/docs/build/reference/customtask/validateXsdOperator.md +++ b/docs/build/reference/customtask/validateXsdOperator.md @@ -8,11 +8,8 @@ tags: # Validate XML - - Validates an XML dataset against a provided XML schema (XSD) file. Any errors are written to the output. Can be used in conjunction with the `Cancel Workflow` operator in order to stop the workflow if errors have been found. - ## Parameter ### File @@ -23,10 +20,6 @@ The XSD file to be used for validating the XML. - Datatype: `resource` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/customtask/xsltOperator.md b/docs/build/reference/customtask/xsltOperator.md index 7d0146c0f..69521813e 100644 --- a/docs/build/reference/customtask/xsltOperator.md +++ b/docs/build/reference/customtask/xsltOperator.md @@ -8,8 +8,6 @@ tags: # XSLT - - ## Description of the plugin The plugin `xsltOperator` is a custom task which can be used in a workflow in order to transform a given **XML file** using an **XSL transformation** from an XSLT file. The filename extension of such a XSL transformation is, accordingly, `.xslt`. @@ -19,19 +17,20 @@ The **output** of the XML transformation is saved as an output file resource. In In essence and from a technical point of view, the `xsltOperator` is simply a wrapper around the XSLT processor provided by [Saxonica](https://www.saxonica.com/products/products.xml). If you are well-versed in the XSL ecosystem, this is everything you need to know. If not, the remaining of the documentation provides some amount of information and detail on the parts of XSL and XSLT which are relevant for our purposes. + ## Description of XSL and XSLT ### The XSL ecosystem The acronym **XSL** stands for "eXtensible Stylesheet Language". XSL is not a single technology or specification, but a _family of languages_ for processing (transforming) and rendering (presenting) XML documents. It consists of three parts: - 1. XSLT: XSL Transformations - 2. XPath: XML Path Language - 3. XSL-FO: XSL Formatting Objects + 1. XSLT: XSL Transformations + 2. XPath: XML Path Language + 3. XSL-FO: XSL Formatting Objects In a nutshell, this is simply the separation of concerns between "processing" XML and "rendering" the results. -The most relevant of these parts for us, is **XSLT**. XSLT is a language for *transforming* or *processing* XML documents. Originally (around 1999), XSLT was designed for _styling_ XML documents, which is still seen in the nomenclature, e.g. in the term `` or in the acronyms "XSL" and "XSLT" themselves. But other than just _styling XML markup_, XSLT 2.0 (and beyond) is a Turing-complete language, which is used for **transforming XML _data_**. The modern perspective and understanding is therefore not on "XML as a markup language for documents which are presented to a web browser, or converted to a form suitable for printing, such as PDF or PostScript", but, in general terms, on "XML as a means to represent (highly-structured) data, which can be arbitrarily transformed by XSLT". +The most relevant of these parts for us, is **XSLT**. XSLT is a language for _transforming_ or _processing_ XML documents. Originally (around 1999), XSLT was designed for _styling_ XML documents, which is still seen in the nomenclature, e.g. in the term `` or in the acronyms "XSL" and "XSLT" themselves. But other than just _styling XML markup_, XSLT 2.0 (and beyond) is a Turing-complete language, which is used for **transforming XML _data_**. The modern perspective and understanding is therefore not on "XML as a markup language for documents which are presented to a web browser, or converted to a form suitable for printing, such as PDF or PostScript", but, in general terms, on "XML as a means to represent (highly-structured) data, which can be arbitrarily transformed by XSLT". _If_ the aspect of formatting semantics is relevant to us, then we'd need to consider and describe the XSL Formatting Objects (**XSL-FO**) vocabulary. This is beyond the scope of this document. Our focus is on the transformation of XML data via XSL transformations. @@ -52,11 +51,13 @@ The **XSL transform** turns the so-called **source tree** into a **result tree** A minimal example of the (1) XML input data, (2) a corresponding XSL transformation and the (3) generated output is the following: **XML data** (`.xml` file): + ```xml 1984 ``` **XSL stylesheet** (`.xslt` file): + ```xml @@ -66,11 +67,13 @@ A minimal example of the (1) XML input data, (2) a corresponding XSL transformat ``` **Output:** (`.html` file) + ```html

1984

``` In this example: + 1. The **XML** holds the input information (`1984`). 2. The **XSL stylesheet** specifies how that information should be formatted (it takes the title and places it inside HTML). 3. The **XSL transformation** processes both files to create the **final HTML result** (`

1984

`). @@ -80,11 +83,13 @@ In this example: A slightly more complex example is the following: **XML data** (`.xml` file): + ```xml Alice30 ``` **XSL stylesheet** (`.xslt` file): + ```xml @@ -94,6 +99,7 @@ A slightly more complex example is the following: ``` **Output:** (`.html` file) + ```html

Alice is 30 years old.

``` @@ -105,6 +111,7 @@ A slightly more complex example is the following: A rather straightforward but specific example could be the following _conversion from XML data to **RDF**_: **XML**: + ```xml Alice @@ -113,6 +120,7 @@ A rather straightforward but specific example could be the following _conversion ``` **XSL stylesheet**: + ```xml @@ -128,6 +136,7 @@ A rather straightforward but specific example could be the following _conversion ``` **Result (RDF/XML)**: + ```xml @@ -139,17 +148,20 @@ A rather straightforward but specific example could be the following _conversion ``` In this example: + 1. The **XSLT processor** matches the `` element in the input XML. 2. It **creates** an `` root element with the required namespaces. 3. It **constructs** a `` resource, using the `id` attribute as the subject URI. 4. It **writes** a `` element with the value from ``. 5. It **adds** a `` element with a `mailto:` URI built from ``. 6. It **outputs** the final RDF/XML document representing the RDF triples. + ##### JSON style The same straightforward example of an XML to RDF conversion, but using the `JSON-LD` style for the result: **XSL stylesheet** + ```xml @@ -165,6 +177,7 @@ The same straightforward example of an XML to RDF conversion, but using the `JSO ``` **Result (JSON-LD style)**: + ```json { "@id": "#p1", @@ -205,6 +218,7 @@ More conceptually, other use cases of XSLT include: - **Content publishing** — generating multiple output formats (HTML, PDF, EPUB) from a single XML source. - **Configuration and code generation** — producing scripts, configuration files, or documentation from XML metadata. - **Visualization** — creating SVG charts, diagrams, or interactive web elements from structured XML data. + ## How does XSL relate to RDF? **XSL**, specifically **XSLT**, relates to **RDF** by providing a way to **transform XML data into RDF serializations** (like RDF/XML or JSON-LD). It acts as a bridge between structured XML sources and Semantic Web representations, enabling automated RDF generation from existing XML-based datasets. @@ -218,11 +232,10 @@ To import XML into a **Knowledge Graph**, you typically: 3. **Validate the RDF** — check it against the ontology or SHACL shapes. 4. **Load the RDF** — import it into a **triple store** or **graph database** (e.g., Fuseki, GraphDB, Neo4j). -Whereas this process *does* the job of importing XML into a knowledge graph, using a **data integration** solution based on **knowledge graphs**, such as [eccenca Corporate Memory](https://eccenca.com/products/enterprise-knowledge-graph-platform-corporate-memory) (CMEM), is a *much better fit*. For an example on this, see the [tutorial on lifting data from an XML source](https://documentation.eccenca.com/latest/build/lift-data-from-json-and-xml-sources/), and notice how each of the steps (mapping, transforming, validating, loading) is realized. In such an improved setting, notice how the `xsltOperator` plugin is _**not** used for transforming XML into RDF_, but only for _transforming the data you want to import and bring it to the XML format_. The second step of the list (_transform the XML into RDF_) is taken care of by CMEM itself. The usage of XSLT is, therefore, limited to what is required by your input data and data processing requirements, not by the technicalities behind the semantic data integration. +Whereas this process _does_ the job of importing XML into a knowledge graph, using a **data integration** solution based on **knowledge graphs**, such as [eccenca Corporate Memory](https://eccenca.com/products/enterprise-knowledge-graph-platform-corporate-memory) (CMEM), is a _much better fit_. For an example on this, see the [tutorial on lifting data from an XML source](https://documentation.eccenca.com/latest/build/lift-data-from-json-and-xml-sources/), and notice how each of the steps (mapping, transforming, validating, loading) is realized. In such an improved setting, notice how the `xsltOperator` plugin is _**not** used for transforming XML into RDF_, but only for _transforming the data you want to import and bring it to the XML format_. The second step of the list (_transform the XML into RDF_) is taken care of by CMEM itself. The usage of XSLT is, therefore, limited to what is required by your input data and data processing requirements, not by the technicalities behind the semantic data integration. Notice as well that the source of XML data does not need to be an XML _file_. An alternative could be a Web API providing XML instead of JSON responses. See the [tutorial on extracting data from a Web API](https://documentation.eccenca.com/latest/build/extracting-data-from-a-web-api/), and use an **XML parser** and **XML Dataset** instead of the JSON variants described in the tutorial. Otherwise, the process is the same. - ## Parameter ### File @@ -233,10 +246,6 @@ The XSLT file to be used for transforming XML. - Datatype: `resource` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/Hive.md b/docs/build/reference/dataset/Hive.md index eefe10836..367bef107 100644 --- a/docs/build/reference/dataset/Hive.md +++ b/docs/build/reference/dataset/Hive.md @@ -8,11 +8,8 @@ tags: # Hive database - - Read from or write to an embedded Apache Hive endpoint. - ## Parameter ### Schema @@ -23,8 +20,6 @@ Name of the hive schema or namespace. - Datatype: `string` - Default Value: `None` - - ### Table Name of the hive table. @@ -33,8 +28,6 @@ Name of the hive table. - Datatype: `string` - Default Value: `None` - - ### Query Optional query for projection and selection (e.g. " SELECT * FROM table WHERE x = true". @@ -43,8 +36,6 @@ Optional query for projection and selection (e.g. " SELECT * FROM table WHERE x - Datatype: `string` - Default Value: `None` - - ### Uri pattern A pattern used to construct the entity URI. If not provided the prefix + the line number is used. An example of such a pattern is 'urn:zyx:{id}' where *id* is a name of a property. @@ -53,8 +44,6 @@ A pattern used to construct the entity URI. If not provided the prefix + the lin - Datatype: `string` - Default Value: `None` - - ### Properties Comma-separated list of URL-encoded properties. If not provided, the list of properties is read from the first line. @@ -63,8 +52,6 @@ Comma-separated list of URL-encoded properties. If not provided, the list of pro - Datatype: `string` - Default Value: `None` - - ### Charset The source internal encoding, e.g., UTF8, ISO-8859-1 @@ -73,10 +60,6 @@ The source internal encoding, e.g., UTF8, ISO-8859-1 - Datatype: `string` - Default Value: `UTF-8` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/Jdbc.md b/docs/build/reference/dataset/Jdbc.md index ac70d59e6..b85563bc6 100644 --- a/docs/build/reference/dataset/Jdbc.md +++ b/docs/build/reference/dataset/Jdbc.md @@ -8,8 +8,6 @@ tags: # Remote SQL endpoint - - ## General usage The JDBC dataset supports connections to Hive, Microsoft SQL Server, MySQL, MariaDB, SnowFlake, Oracle Database, DB2 and PostgreSQL databases. @@ -31,6 +29,7 @@ Most of the dataset parameters are passed directly to the driver. Please make sure that you use the correct syntax for each DBMS, otherwise you may get unintuitive errors. Here are templates for supported database systems: + ``` oracle (external driver needed): jdbc:oracle:thin:@{host}[:{port}]/{database} @@ -81,14 +80,14 @@ For some DBMS and specific JDBC dataset configurations we support these optimize Supported DBMS: - MySQL and MariaDB (full support for versions 8.0.19+ and 10.4+, resp.): - - if older DBMS versions are used some dataset options like 'groupBy' might not be supported but equivalent queries will - - the same is true when older driver jars then the one provided by eccenca are used - - both use the MariaDB JDBC driver - - uses `LOAD DATA LOCAL INFILE` internally - - only applies when appending data to an existing table and having `Force Spark Execution` disabled - - Both the server parameter `local_infile` and the client parameter `allowLoadLocalInfile` must be enabled, e.g. by adding `allowLoadLocalInfile=true` to the JDBC URL. + - if older DBMS versions are used some dataset options like 'groupBy' might not be supported but equivalent queries will + - the same is true when older driver jars then the one provided by eccenca are used + - both use the MariaDB JDBC driver + - uses `LOAD DATA LOCAL INFILE` internally + - only applies when appending data to an existing table and having `Force Spark Execution` disabled + - Both the server parameter `local_infile` and the client parameter `allowLoadLocalInfile` must be enabled, e.g. by adding `allowLoadLocalInfile=true` to the JDBC URL. For MySQL starting with version 8 the `local_infile` parameter is by default disabled! - - If during writing to a MySQL/MariaDB a `[…] You have an error in your SQL syntax […]` error is encountered make sure ANSIquotes are used. + - If during writing to a MySQL/MariaDB a `[…] You have an error in your SQL syntax […]` error is encountered make sure ANSIquotes are used. `sql_mode=ANSI_QUOTES` can be set via a URL parameter to the JDBC connection string like: ```sh @@ -132,7 +131,7 @@ spark.sql.options { In general it will not work to upgrade a JDBC driver by providing an external driver for a database that is already packaged with eccenca Dataintegration. -The driver delivered with eccenca Dataintegration will be prefered. Driver names (configured via e.g. `spark.sql.options.jdbc.drivers = "mssql"`) will be ignored if JDBC URLs starting with, in this example `jdbc:mssql...` , are already supported in the dataset. +The driver delivered with eccenca Dataintegration will be prefered. Driver names (configured via e.g. `spark.sql.options.jdbc.drivers = "mssql"`) will be ignored if JDBC URLs starting with, in this example `jdbc:mssql...` , are already supported in the dataset. _Recommended DBMS versions_ @@ -145,7 +144,6 @@ _Recommended DBMS versions_ These limitations are the same for JDBC drivers that are older than the fully supported databases. Queries can achieve a similar outcome if `groupBy` is not supported. - ## Parameter ### JDBC Driver Connection URL @@ -156,8 +154,6 @@ JDBC URL, must contain the database as parameter, i.g. with ;database=DBNAME or - Datatype: `string` - Default Value: `None` - - ### Table Table name. Can be empty if the read-strategy is not set to read the full table. If non-empty it has to contain at least an existing table. @@ -166,8 +162,6 @@ Table name. Can be empty if the read-strategy is not set to read the full table. - Datatype: `string` - Default Value: `None` - - ### Source query Source query (e.g. 'SELECT TOP 10 * FROM table WHERE x = true'. Warning: Uses Driver (mySql, HiveQL, MSSql, Postgres) specific syntax. Can be left empty when full tables are loaded. Note: Even if columns with spaces/special characters are named in the query, they need to be referred to URL-encoded in subsequent transformations. @@ -176,8 +170,6 @@ Source query (e.g. 'SELECT TOP 10 * FROM table WHERE x = true'. Warning: Uses Dr - Datatype: `code-sql` - Default Value: `None` - - ### Group by Comma separated list of attributes appearing in the outer SELECT clause that should be grouped by. The attributes are matched case-insensitive. All other attributes will be grouped via an aggregation function that depends on the supported DBMS, e.g. (JSON) array aggregation. @@ -186,8 +178,6 @@ Comma separated list of attributes appearing in the outer SELECT clause that sho - Datatype: `string` - Default Value: `None` - - ### Order by Optional column to sort the result set. @@ -196,8 +186,6 @@ Optional column to sort the result set. - Datatype: `string` - Default Value: `None` - - ### Limit Optional limit of returned records. This limit should be pushed to the source. No value implies that no limit will be applied. @@ -206,8 +194,6 @@ Optional limit of returned records. This limit should be pushed to the source. N - Datatype: `option[int]` - Default Value: `10` - - ### Query strategy The strategy decides how the source system is queried. @@ -216,8 +202,6 @@ The strategy decides how the source system is queried. - Datatype: `enumeration` - Default Value: `access-complete-table` - - ### Write strategy If this dataset is written to, it can be selected if data is overwritten or appended.' @@ -226,8 +210,6 @@ If this dataset is written to, it can be selected if data is overwritten or appe - Datatype: `enumeration` - Default Value: `default` - - ### Multiple values strategy How multiple values per entity property are written. @@ -236,8 +218,6 @@ How multiple values per entity property are written. - Datatype: `enumeration` - Default Value: `concatenateValuesStrategy` - - ### Clear table before workflow execution If set to true this will clear the specified table before executing a workflow that writes to it. @@ -246,8 +226,6 @@ If set to true this will clear the specified table before executing a workflow t - Datatype: `boolean` - Default Value: `false` - - ### User Username. Must be empty in some cases e.g. if secret key and client id are used. If non-empty this will also overwrite any value set in the JDBC URL string. @@ -256,8 +234,6 @@ Username. Must be empty in some cases e.g. if secret key and client id are used. - Datatype: `string` - Default Value: `None` - - ### Password Password. Can be empty in some cases e.g. secret key and client id are used or if it is just an empty string. The password must be set here and cannot be set in the JDBC URL connection string. @@ -266,8 +242,6 @@ Password. Can be empty in some cases e.g. secret key and client id are used or i - Datatype: `password` - Default Value: `None` - - ### Restriction An SQL WHERE clause to filter the records to be retrieved. @@ -276,10 +250,6 @@ An SQL WHERE clause to filter the records to be retrieved. - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### Token endpoint URL (Azure Active Directory) @@ -290,18 +260,14 @@ URL for retrieving tokens, when using MS SQL Active Directory token based authen - Datatype: `string` - Default Value: `None` - - ### Service principal name (Azure Active Directory) -Service Principal Name identifying the resource. Usually a static URL like https://database.windows.net. +Service Principal Name identifying the resource. Usually a static URL like . - ID: `spnName` - Datatype: `string` - Default Value: `None` - - ### Client id (Azure Active Directory) Client id or application id. Client id used for MS SQL token based authentication. String seperated by - char. @@ -310,8 +276,6 @@ Client id or application id. Client id used for MS SQL token based authenticatio - Datatype: `string` - Default Value: `None` - - ### Client secret (Azure Active Directory) Client secret. Client secret used for MS SQL token based authentication. Can be generated in Azure AD admin center. @@ -320,8 +284,6 @@ Client secret. Client secret used for MS SQL token based authentication. Can be - Datatype: `password` - Default Value: `None` - - ### Retries Optional number of retries per query @@ -330,8 +292,6 @@ Optional number of retries per query - Datatype: `int` - Default Value: `0` - - ### Pause Optional pause between queries in ms. @@ -340,8 +300,6 @@ Optional pause between queries in ms. - Datatype: `int` - Default Value: `2000` - - ### Charset The source internal encoding, e.g., UTF-8, ISO-8859-1 @@ -350,8 +308,6 @@ The source internal encoding, e.g., UTF-8, ISO-8859-1 - Datatype: `string` - Default Value: `UTF-8` - - ### Force spark execution If set to true, Spark will be used for querying the database, even if the local execution manager is configured. @@ -359,6 +315,3 @@ If set to true, Spark will be used for querying the database, even if the local - ID: `forceSparkExecution` - Datatype: `boolean` - Default Value: `false` - - - diff --git a/docs/build/reference/dataset/LocalInternalDataset.md b/docs/build/reference/dataset/LocalInternalDataset.md index 4bd414514..3492705e9 100644 --- a/docs/build/reference/dataset/LocalInternalDataset.md +++ b/docs/build/reference/dataset/LocalInternalDataset.md @@ -8,15 +8,12 @@ tags: # Internal dataset (single graph) - - Dataset for storing entities between workflow steps. This variant does use the same graph for all internal datasets in a workflow. The underlying dataset type can be configured using the `dataset.internal.*` configuration parameters. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/SnowflakeJdbc.md b/docs/build/reference/dataset/SnowflakeJdbc.md index 95ea8335f..1fe8a1f74 100644 --- a/docs/build/reference/dataset/SnowflakeJdbc.md +++ b/docs/build/reference/dataset/SnowflakeJdbc.md @@ -8,8 +8,6 @@ tags: # Snowflake SQL endpoint - - This dataset supports connections to the Snowflake JDBC endpoint. ## Account URL hostname @@ -47,16 +45,12 @@ The names of the written tables are generated as follows: Special characters are removed and the name is truncated to a maximum of 128 characters. - For each object mapping, the table name is generated from its type. - ## Parameter ### Connection Connection parameters - - - #### Account URL hostname The hostname which is used for the connection. Usually, this is something like '-.snowflakecomputing.com' @@ -65,7 +59,6 @@ The hostname which is used for the connection. Usually, this is something like ' - Datatype: `string` - Default Value: `-.snowflakecomputing.com` - #### Port HTTP port @@ -74,7 +67,6 @@ HTTP port - Datatype: `int` - Default Value: `443` - #### User Username @@ -83,7 +75,6 @@ Username - Datatype: `string` - Default Value: `None` - #### Password Password for basic authentication. Leave empty if key-pair authentication should be used. @@ -92,7 +83,6 @@ Password for basic authentication. Leave empty if key-pair authentication should - Datatype: `password` - Default Value: `None` - #### Private key The private key for the specified user. Leave empty if basic password authentication should be used. @@ -101,7 +91,6 @@ The private key for the specified user. Leave empty if basic password authentica - Datatype: `password` - Default Value: `None` - #### Private key password Password for encrypted private keys. Can be left empty if using an unencrypted key. @@ -110,7 +99,6 @@ Password for encrypted private keys. Can be left empty if using an unencrypted k - Datatype: `password` - Default Value: `None` - #### Additional parameters Additional JDBC connection parameters. @@ -119,7 +107,6 @@ Additional JDBC connection parameters. - Datatype: `keyValuePairs` - Default Value: `None` - #### Warehouse Warehouse @@ -128,7 +115,6 @@ Warehouse - Datatype: `string` - Default Value: `None` - #### Database Database @@ -137,7 +123,6 @@ Database - Datatype: `string` - Default Value: `None` - #### Schema Schema @@ -146,7 +131,6 @@ Schema - Datatype: `string` - Default Value: `None` - #### Table Table name. Can be empty if the read-strategy is not set to read the full table. @@ -155,15 +139,10 @@ Table name. Can be empty if the read-strategy is not set to read the full table. - Datatype: `string` - Default Value: `None` - - ### Read Parameters related to reading from the database. - - - #### Source query Source query (e.g. 'SELECT TOP 10 * FROM table WHERE x = true'. Can be left empty when full tables are loaded. Note: Even if columns with spaces/special characters are named in the query, they need to be referred to URL-encoded in subsequent transformations. @@ -172,7 +151,6 @@ Source query (e.g. 'SELECT TOP 10 * FROM table WHERE x = true'. Can be left empt - Datatype: `code-sql` - Default Value: `None` - #### Group by Comma separated list of attributes appearing in the outer SELECT clause that should be grouped by. The attributes are matched case-insensitive. All other attributes will be grouped via an aggregation function that depends on the supported DBMS, e.g. (JSON) array aggregation. @@ -181,7 +159,6 @@ Comma separated list of attributes appearing in the outer SELECT clause that sho - Datatype: `string` - Default Value: `None` - #### Order by Optional column to sort the result set. @@ -190,7 +167,6 @@ Optional column to sort the result set. - Datatype: `string` - Default Value: `None` - #### Limit Optional limit of returned records. This limit should be pushed to the source. No value implies that no limit will be applied. @@ -199,7 +175,6 @@ Optional limit of returned records. This limit should be pushed to the source. N - Datatype: `option[int]` - Default Value: `10` - #### Query strategy The strategy decides how the source system is queried. @@ -208,7 +183,6 @@ The strategy decides how the source system is queried. - Datatype: `enumeration` - Default Value: `access-complete-table` - #### Restriction An SQL WHERE clause to filter the records to be retrieved. @@ -217,15 +191,10 @@ An SQL WHERE clause to filter the records to be retrieved. - Datatype: `string` - Default Value: `None` - - ### Write Parameters related to writing to the database. - - - #### Write strategy If this dataset is written to, it can be selected if data is overwritten or appended.' @@ -234,7 +203,6 @@ If this dataset is written to, it can be selected if data is overwritten or appe - Datatype: `enumeration` - Default Value: `default` - #### Multiple values strategy How multiple values per entity property are written. @@ -243,19 +211,12 @@ How multiple values per entity property are written. - Datatype: `enumeration` - Default Value: `concatenateValuesStrategy` - - - - ## Advanced Parameter ### Query execution Query execution parameters. - - - #### Retries Optional number of retries per query @@ -264,7 +225,6 @@ Optional number of retries per query - Datatype: `int` - Default Value: `0` - #### Pause Optional pause between queries in ms. @@ -272,6 +232,3 @@ Optional pause between queries in ms. - ID: `queryExecution.pause` - Datatype: `int` - Default Value: `2000` - - - diff --git a/docs/build/reference/dataset/alignment.md b/docs/build/reference/dataset/alignment.md index 51ea7daac..4511d448e 100644 --- a/docs/build/reference/dataset/alignment.md +++ b/docs/build/reference/dataset/alignment.md @@ -8,10 +8,7 @@ tags: # Alignment - - -Writes the alignment format specified at http://alignapi.gforge.inria.fr/format.html. - +Writes the alignment format specified at . ## Parameter @@ -23,10 +20,6 @@ The alignment file. - Datatype: `resource` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/avro.md b/docs/build/reference/dataset/avro.md index ba4ce3418..762aae031 100644 --- a/docs/build/reference/dataset/avro.md +++ b/docs/build/reference/dataset/avro.md @@ -8,10 +8,7 @@ tags: # Avro - - -Read from or write to an Apache Avro file. - +Read from or write to an Apache Avro file. ## Parameter @@ -23,8 +20,6 @@ Path (e.g. relative like `path/filename.avro` or absolute `hdfs:///path/filename - Datatype: `resource` - Default Value: `None` - - ### Uri pattern A pattern used to construct the entity URI. If not provided the prefix + the line number is used. An example of such a pattern is `urn:zyx:{id}` where `*id*` is a name of a property. @@ -33,8 +28,6 @@ A pattern used to construct the entity URI. If not provided the prefix + the lin - Datatype: `string` - Default Value: `None` - - ### Properties Comma-separated list of URL-encoded properties. If not provided, the list of properties is read from the first line. @@ -43,8 +36,6 @@ Comma-separated list of URL-encoded properties. If not provided, the list of pro - Datatype: `string` - Default Value: `None` - - ### Charset The file encoding, e.g., UTF8, ISO-8859-1 @@ -53,10 +44,6 @@ The file encoding, e.g., UTF8, ISO-8859-1 - Datatype: `string` - Default Value: `UTF-8` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/binaryFile.md b/docs/build/reference/dataset/binaryFile.md index 30b203f66..553f4065d 100644 --- a/docs/build/reference/dataset/binaryFile.md +++ b/docs/build/reference/dataset/binaryFile.md @@ -8,13 +8,12 @@ tags: # Binary file - - Reads and writes binary files. A typical use-case for this dataset is to process PDF documents or images using workflow operators that accept or output files. If an operator reads from this dataset that does not support files directly (such as transformation or linking tasks), it will only receive the file metadata, which includes the file path. ## ZIP files This dataset can be used to compress/decompress ZIP files. If a ZIP file is configured, the behaviour is as follows: + - Writing a ZIP file to this dataset will overwrite the configured ZIP file. - Writing one or many non-ZIP files will overwrite the dataset file with a ZIP that contains all written files. - When reading files, the dataset will return all files inside the ZIP that match the configured regex. If the regex is empty, the ZIP file itself will be returned. @@ -28,7 +27,6 @@ Same for the `replacable output` flag, which will return the file content as a r The generic MIME type for files of this dataset is `application/octet-stream`. - ## Parameter ### File @@ -39,8 +37,6 @@ The file to read or write. - Datatype: `resource` - Default Value: `None` - - ### ZIP file regex If the file is a ZIP file, read files are filtered via this regex. If empty, the zip itself will be returned to readers. @@ -49,10 +45,6 @@ If the file is a ZIP file, read files are filtered via this regex. If empty, the - Datatype: `string` - Default Value: `.*` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/csv.md b/docs/build/reference/dataset/csv.md index 7c427b752..5e5d558d1 100644 --- a/docs/build/reference/dataset/csv.md +++ b/docs/build/reference/dataset/csv.md @@ -8,11 +8,8 @@ tags: # CSV - - Read from or write to an CSV file. - ## Parameter ### File @@ -23,8 +20,6 @@ The CSV file. This may also be a zip archive of multiple CSV files that share th - Datatype: `resource` - Default Value: `None` - - ### Properties Comma-separated list of properties. If not provided, the list of properties is read from the first line. Properties that are no valid (relative or absolute) URIs will be encoded. @@ -33,8 +28,6 @@ Comma-separated list of properties. If not provided, the list of properties is r - Datatype: `string` - Default Value: `None` - - ### Separator The character that is used to separate values. If not provided, defaults to ',', i.e., comma-separated values. "\t" for specifying tab-separated values, is also supported. @@ -43,8 +36,6 @@ The character that is used to separate values. If not provided, defaults to ',', - Datatype: `string` - Default Value: `,` - - ### Array separator The character that is used to separate the parts of array values. Write "\t" to specify the tab character. @@ -53,8 +44,6 @@ The character that is used to separate the parts of array values. Write "\t" to - Datatype: `string` - Default Value: `None` - - ### Quote Character used to quote values. @@ -63,8 +52,6 @@ Character used to quote values. - Datatype: `string` - Default Value: `"` - - ### Charset The file encoding, e.g., UTF-8, UTF-8-BOM, ISO-8859-1 @@ -73,8 +60,6 @@ The file encoding, e.g., UTF-8, UTF-8-BOM, ISO-8859-1 - Datatype: `string` - Default Value: `UTF-8` - - ### Regex filter A regex filter used to match rows from the CSV file. If not set all the rows are used. @@ -83,8 +68,6 @@ A regex filter used to match rows from the CSV file. If not set all the rows are - Datatype: `string` - Default Value: `None` - - ### Lines to skip The number of lines to skip in the beginning, e.g. copyright, meta information etc. @@ -93,8 +76,6 @@ The number of lines to skip in the beginning, e.g. copyright, meta information e - Datatype: `int` - Default Value: `0` - - ### Ignore bad lines If set to true then the parser will ignore lines that have syntax errors or do not have to correct number of fields according to the current config. @@ -103,8 +84,6 @@ If set to true then the parser will ignore lines that have syntax errors or do n - Datatype: `boolean` - Default Value: `false` - - ### Quote escape character Escape character to be used inside quotes, used to escape the quote character. It must also be used to escape itself, e.g. by doubling it, e.g. "". If left empty, it defaults to quote. @@ -113,10 +92,6 @@ Escape character to be used inside quotes, used to escape the quote character. I - Datatype: `string` - Default Value: `"` - - - - ## Advanced Parameter ### URI pattern @@ -127,8 +102,6 @@ Escape character to be used inside quotes, used to escape the quote character. I - Datatype: `string` - Default Value: `None` - - ### Max chars per column The maximum characters per column. *Warning*: System will request heap memory of that size (2 bytes per character) when reading the CSV. If there are more characters found, the parser will fail. @@ -137,8 +110,6 @@ The maximum characters per column. *Warning*: System will request heap memory of - Datatype: `int` - Default Value: `128000` - - ### ZIP file regex If the input resource is a ZIP file, files inside the file are filtered via this regex. @@ -147,8 +118,6 @@ If the input resource is a ZIP file, files inside the file are filtered via this - Datatype: `string` - Default Value: `^(?!.*[\/\\]\..*$|^\..*$).*\.csv` - - ### Delete file before workflow execution If set to true this will clear the specified file before executing a workflow that writes to it. @@ -157,15 +126,10 @@ If set to true this will clear the specified file before executing a workflow th - Datatype: `boolean` - Default Value: `false` - - -### Trim whitespace and non-printable characters. +### Trim whitespace and non-printable characters If set to true, this will trim whitespace and non-printable characters from the contents of the CSV dataset. - ID: `trimWhitespaceAndNonPrintableCharacters` - Datatype: `boolean` - Default Value: `false` - - - diff --git a/docs/build/reference/dataset/eccencaDataPlatform.md b/docs/build/reference/dataset/eccencaDataPlatform.md index 994d1f771..aece1073e 100644 --- a/docs/build/reference/dataset/eccencaDataPlatform.md +++ b/docs/build/reference/dataset/eccencaDataPlatform.md @@ -8,8 +8,6 @@ tags: # Knowledge Graph - - The Knowledge Graph plugin is a dataset for reading and writing RDF to a knowledge graph embedded in Corporate Memory. ## Description @@ -58,51 +56,39 @@ _semantic data management software_. The additional possibility of integrating w the SPARQL endpoint is merely a small part of the possibilities to [consume](https://documentation.eccenca.com/latest/consume/) data within CMEM. - - ## Parameter ### Graph The URI of the named graph. -- ID: `graph` -- Datatype: `graph uri` -- Default Value: `None` - - +* ID: `graph` +* Datatype: `graph uri` +* Default Value: `None` ### Clear graph before workflow execution If set to `true`, this will clear the specified graph before executing a workflow that writes to it. Note that this will always use the configured graph and ignore any overwritten values from the config port. -- ID: `clearGraphBeforeExecution` -- Datatype: `boolean` -- Default Value: `false` - - +* ID: `clearGraphBeforeExecution` +* Datatype: `boolean` +* Default Value: `false` ### SPARQL query timeout (ms) -SPARQL query timeout in milliseconds. By default, a value of zero is used. This zero value has a symbolic character: it means that the timeout of SPARQL select and update queries is configured via the properties `silk.remoteSparqlEndpoint.defaults.connection.timeout.ms and `silk.remoteSparqlEndpoint.defaults.read.timeout.ms` for the default connection and read timeouts. To overwrite these configured values, specify a (common) timeout greater than zero milliseconds. - -- ID: `sparqlTimeout` -- Datatype: `int` -- Default Value: `0` - +SPARQL query timeout in milliseconds. By default, a value of zero is used. This zero value has a symbolic character: it means that the timeout of SPARQL select and update queries is configured via the properties `silk.remoteSparqlEndpoint.defaults.connection.timeout.ms and`silk.remoteSparqlEndpoint.defaults.read.timeout.ms` for the default connection and read timeouts. To overwrite these configured values, specify a (common) timeout greater than zero milliseconds. +* ID: `sparqlTimeout` +* Datatype: `int` +* Default Value: `0` ### Optimized entity retrieval Optimized retrieval method to remove load from the underlying triple store. Query parallelism is limited and cheaper queries are executed against the backend. By putting the main work on DataIntegration side, the RDF backend is kept responsive. -- ID: `optimizedRetrieve` -- Datatype: `boolean` -- Default Value: `true` - - - - +* ID: `optimizedRetrieve` +* Datatype: `boolean` +* Default Value: `true` ## Advanced Parameter @@ -110,69 +96,54 @@ Optimized retrieval method to remove load from the underlying triple store. Quer The named endpoint within eccenca DataPlatform. -- ID: `endpoint` -- Datatype: `string` -- Default Value: `default` - - +* ID: `endpoint` +* Datatype: `string` +* Default Value: `default` ### Page size The number of entities to be retrieved per SPARQL query. This is the page size used when paging. -- ID: `pageSize` -- Datatype: `int` -- Default Value: `100000` - - +* ID: `pageSize` +* Datatype: `int` +* Default Value: `100000` ### Pause time The number of milliseconds to wait between subsequent query -- ID: `pauseTime` -- Datatype: `int` -- Default Value: `0` - - +* ID: `pauseTime` +* Datatype: `int` +* Default Value: `0` ### Retry count The number of retries if a query fails -- ID: `retryCount` -- Datatype: `int` -- Default Value: `3` - - +* ID: `retryCount` +* Datatype: `int` +* Default Value: `3` ### Retry pause The number of milliseconds to wait until a failed query is retried. -- ID: `retryPause` -- Datatype: `int` -- Default Value: `1000` - - +* ID: `retryPause` +* Datatype: `int` +* Default Value: `1000` ### Strategy The strategy for retrieving entities. There are three options: `simple` retrieves all entities using a single query; `subQuery` also uses a single query, which is optimized for Virtuoso; `parallel` executes multiple queries in parallel, one for each entity property. -- ID: `strategy` -- Datatype: `enumeration` -- Default Value: `parallel` - - +* ID: `strategy` +* Datatype: `enumeration` +* Default Value: `parallel` ### Entity list A list of entities to be retrieved. If not given, all entities will be retrieved. Multiple entities are separated by whitespace. -- ID: `entityList` -- Datatype: `multiline string` -- Default Value: `None` - - - +* ID: `entityList` +* Datatype: `multiline string` +* Default Value: `None` diff --git a/docs/build/reference/dataset/excel.md b/docs/build/reference/dataset/excel.md index 905b70f6a..21376adbb 100644 --- a/docs/build/reference/dataset/excel.md +++ b/docs/build/reference/dataset/excel.md @@ -8,11 +8,8 @@ tags: # Excel - - Read from or write to an Excel workbook in Open XML format (XLSX). The sheet is selected by specifying it as type in the subsequent workflow operator. - ## Parameter ### File @@ -23,8 +20,6 @@ File name inside the resources directory. - Datatype: `resource` - Default Value: `None` - - ### Lines to skip The number of lines to skip in the beginning when reading files. @@ -33,8 +28,6 @@ The number of lines to skip in the beginning when reading files. - Datatype: `int` - Default Value: `0` - - ### Has header If true, the first line will be read as the table header, which defines the column names. If false, the first line will be read as data. In that case, the columns need to be adressed using #A, #B, etc. @@ -43,8 +36,6 @@ If true, the first line will be read as the table header, which defines the colu - Datatype: `boolean` - Default Value: `true` - - ### Output object values Output results from object rules (URIs). @@ -53,10 +44,6 @@ Output results from object rules (URIs). - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter ### Streaming @@ -66,6 +53,3 @@ Streaming enables reading and writing large Excels files. Warning: Be careful to - ID: `streaming` - Datatype: `boolean` - Default Value: `true` - - - diff --git a/docs/build/reference/dataset/file.md b/docs/build/reference/dataset/file.md index 7150a1e97..9cb33cc7a 100644 --- a/docs/build/reference/dataset/file.md +++ b/docs/build/reference/dataset/file.md @@ -8,11 +8,8 @@ tags: # RDF file - - Dataset which retrieves and writes all entities from/to an RDF file. For reading, the dataset is loaded in-memory and thus the size is restricted by the available memory. Large datasets should be loaded into an external RDF store and retrieved using the SPARQL dataset instead. - ## Parameter ### File @@ -23,8 +20,6 @@ The RDF file. This may also be a zip archive of multiple RDF files. - Datatype: `resource` - Default Value: `None` - - ### Format Optional RDF format. If left empty, it will be auto-detected based on the file extension. N-Triples is the only format that can be written, while other formats can only be read. @@ -33,8 +28,6 @@ Optional RDF format. If left empty, it will be auto-detected based on the file e - Datatype: `string` - Default Value: `None` - - ### Graph The graph name to be read. If not provided, the default graph will be used. Must be provided if the format is N-Quads. @@ -43,10 +36,6 @@ The graph name to be read. If not provided, the default graph will be used. Must - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter ### Entity list @@ -57,8 +46,6 @@ A list of entities to be retrieved. If not given, all entities will be retrieved - Datatype: `multiline string` - Default Value: `None` - - ### ZIP file regex If the input resource is a ZIP file, files inside the file are filtered via this regex. @@ -66,6 +53,3 @@ If the input resource is a ZIP file, files inside the file are filtered via this - ID: `zipFileRegex` - Datatype: `string` - Default Value: `.*` - - - diff --git a/docs/build/reference/dataset/googlespreadsheet.md b/docs/build/reference/dataset/googlespreadsheet.md index f64cdbd63..caa172a35 100644 --- a/docs/build/reference/dataset/googlespreadsheet.md +++ b/docs/build/reference/dataset/googlespreadsheet.md @@ -8,9 +8,6 @@ tags: # Excel (Google Drive) - - - The dataset needs the document id of a "share via url" sheet on Google Drive as input. It will automatically correct the URL and add the "export as xlsx" option to a new URL that will be used to download an Excel Spreadsheet. @@ -24,7 +21,6 @@ A file based cache is created to avoid CAPTCHAs. During the caching and validati access occurs with random wait times between 1 and 5 seconds. The cache is invalidated after 5 minutes by default. - ## Parameter ### URL @@ -35,8 +31,6 @@ Link to the document ('share with anyone having a link' must be enabled, URL par - Datatype: `string` - Default Value: `None` - - ### Lines to skip The number of lines to skip in the beginning when reading files. @@ -45,10 +39,6 @@ The number of lines to skip in the beginning when reading files. - Datatype: `int` - Default Value: `0` - - - - ## Advanced Parameter ### Streaming @@ -59,8 +49,6 @@ Streaming enables reading and writing large Excels files. Warning: Be careful to - Datatype: `boolean` - Default Value: `true` - - ### Invalidate cache after Duration until file based cache is invalidated. @@ -68,6 +56,3 @@ Duration until file based cache is invalidated. - ID: `invalidateCacheAfter` - Datatype: `duration` - Default Value: `PT5M` - - - diff --git a/docs/build/reference/dataset/inMemory.md b/docs/build/reference/dataset/inMemory.md index c8dbd0565..6285316e3 100644 --- a/docs/build/reference/dataset/inMemory.md +++ b/docs/build/reference/dataset/inMemory.md @@ -8,11 +8,8 @@ tags: # In-memory dataset - - A Dataset that holds all data in-memory. - ## Parameter ### Clear graph before workflow execution @@ -23,10 +20,6 @@ If set to true this will clear this dataset before it is used in a workflow exec - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/index.md b/docs/build/reference/dataset/index.md index 6620a922f..12daf45c0 100644 --- a/docs/build/reference/dataset/index.md +++ b/docs/build/reference/dataset/index.md @@ -14,7 +14,7 @@ Datasets are collections of data that can be read or written. | Name | Description | |-------------:|:-------------------------| - | [Alignment](alignment.md) | Writes the alignment format specified at http://alignapi.gforge.inria.fr/format.html. | + | [Alignment](alignment.md) | Writes the alignment format specified at . | | [Avro](avro.md) | Read from or write to an Apache Avro file. | | [Binary file](binaryFile.md) | Reads and writes binary files. A typical use-case for this dataset is to process PDF documents or images. | | [CSV](csv.md) | Read from or write to an CSV file. | diff --git a/docs/build/reference/dataset/internal.md b/docs/build/reference/dataset/internal.md index e7fa46899..561b36209 100644 --- a/docs/build/reference/dataset/internal.md +++ b/docs/build/reference/dataset/internal.md @@ -8,11 +8,8 @@ tags: # Internal dataset - - Dataset for storing entities between workflow steps. The underlying dataset type can be configured using the `dataset.internal.*` configuration parameters. - ## Parameter ### graph URI @@ -23,10 +20,6 @@ The RDF graph that is used for storing internal data - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/json.md b/docs/build/reference/dataset/json.md index 1bd227f41..4cf38405f 100644 --- a/docs/build/reference/dataset/json.md +++ b/docs/build/reference/dataset/json.md @@ -8,8 +8,6 @@ tags: # JSON - - Typically, this dataset is used to transform an JSON file to another format, e.g., to RDF. ## Reading @@ -17,6 +15,7 @@ Typically, this dataset is used to transform an JSON file to another format, e.g In addition to plain JSON files, *JSON Lines* files can also be read. For reading, the JSON dataset supports a number of special paths: + - `#id` Is a special syntax for generating an id for a selected element. It can be used in URI patterns for entities which do not provide an identifier. Examples: `http://example.org/{#id}` or `http://example.org/{/pathToEntity/#id}`. - `#text` retrieves the text of the selected node. - The backslash can be used to navigate to the parent JSON node, e.g., `\parent/key`. The name of the backslash key (here `parent`) is ignored. @@ -25,7 +24,6 @@ For reading, the JSON dataset supports a number of special paths: When writing JSON, all entities need to possess a unique URI. Writing multiple root entities with the same URI will result in multiple entries in the generated JSON. If multiple nested entities with the same URI are written, only the last entity with a given URI will be written. - ## Parameter ### File @@ -36,8 +34,6 @@ JSON file. This may also be a zip archive of multiple JSON files that share the - Datatype: `resource` - Default Value: `None` - - ### Template Template for writing JSON. The term {{output}} will be replaced by the written JSON. @@ -46,8 +42,6 @@ Template for writing JSON. The term {{output}} will be replaced by the written J - Datatype: `code-json` - Default Value: `{{output}}` - - ### Navigate into arrays Navigate into arrays automatically. If set to false, the `#array` path operator must be used to navigate into arrays. @@ -56,10 +50,6 @@ Navigate into arrays automatically. If set to false, the `#array` path operator - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter ### Base path @@ -70,18 +60,14 @@ The path to the elements to be read, starting from the root element, e.g., '/Per - Datatype: `string` - Default Value: `None` - - ### URI pattern (deprecated) -A URI pattern, e.g., http://namespace.org/{ID}, where {path} may contain relative paths to elements +A URI pattern, e.g., , where {path} may contain relative paths to elements - ID: `uriPattern` - Datatype: `string` - Default Value: `None` - - ### Max depth Maximum depth of written JSON. This acts as a safe guard if a recursive structure is written. @@ -90,8 +76,6 @@ Maximum depth of written JSON. This acts as a safe guard if a recursive structur - Datatype: `int` - Default Value: `15` - - ### Streaming Streaming allows for reading large JSON files. If streaming is enabled, backward paths are not supported. @@ -100,8 +84,6 @@ Streaming allows for reading large JSON files. If streaming is enabled, backward - Datatype: `boolean` - Default Value: `true` - - ### ZIP file regex If the input resource is a ZIP file, files inside the file are filtered via this regex. @@ -109,6 +91,3 @@ If the input resource is a ZIP file, files inside the file are filtered via this - ID: `zipFileRegex` - Datatype: `string` - Default Value: `^(?!.*[\/\\]\..*$|^\..*$).*\.jsonl?$` - - - diff --git a/docs/build/reference/dataset/multiCsv.md b/docs/build/reference/dataset/multiCsv.md index 4847492b7..915e0c0f1 100644 --- a/docs/build/reference/dataset/multiCsv.md +++ b/docs/build/reference/dataset/multiCsv.md @@ -8,11 +8,8 @@ tags: # Multi CSV ZIP - - Reads from or writes to multiple CSV files from/to a single ZIP file. - ## Parameter ### File @@ -23,8 +20,6 @@ Zip file name inside the resources directory/repository. - Datatype: `resource` - Default Value: `None` - - ### Separator The character that is used to separate values. If not provided, defaults to ',', i.e., comma-separated values. "\t" for specifying tab-separated values, is also supported. @@ -33,8 +28,6 @@ The character that is used to separate values. If not provided, defaults to ',', - Datatype: `string` - Default Value: `,` - - ### Array separator The character that is used to separate the parts of array values. Write "\t" to specify the tab character. @@ -43,8 +36,6 @@ The character that is used to separate the parts of array values. Write "\t" to - Datatype: `string` - Default Value: `None` - - ### Quote Character used to quote values. @@ -53,8 +44,6 @@ Character used to quote values. - Datatype: `string` - Default Value: `"` - - ### Charset The file encoding, e.g., UTF8, ISO-8859-1 @@ -63,8 +52,6 @@ The file encoding, e.g., UTF8, ISO-8859-1 - Datatype: `string` - Default Value: `UTF-8` - - ### Lines to skip The number of lines to skip in the beginning, e.g. copyright, meta information etc. @@ -73,8 +60,6 @@ The number of lines to skip in the beginning, e.g. copyright, meta information e - Datatype: `int` - Default Value: `0` - - ### Max chars per column The maximum characters per column. If there are more characters found, the parser will fail. @@ -83,8 +68,6 @@ The maximum characters per column. If there are more characters found, the parse - Datatype: `int` - Default Value: `128000` - - ### Ignore bad lines If set to true then the parser will ignore lines that have syntax errors or do not have to correct number of fields according to the current config. @@ -93,8 +76,6 @@ If set to true then the parser will ignore lines that have syntax errors or do n - Datatype: `boolean` - Default Value: `false` - - ### Quote escape character Escape character to be used inside quotes, used to escape the quote character. It must also be used to escape itself, e.g. by doubling it, e.g. "". If left empty, it defaults to quote. @@ -103,8 +84,6 @@ Escape character to be used inside quotes, used to escape the quote character. I - Datatype: `string` - Default Value: `"` - - ### Append files If 'True' then files in the ZIP archive are only added or updated, all other files in the ZIP stay untouched. If 'False' then a new ZIP file will be created on every dataset write. @@ -113,10 +92,6 @@ If 'True' then files in the ZIP archive are only added or updated, all other fil - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter ### ZIP file regex @@ -127,8 +102,6 @@ Filter file paths inside the ZIP file via this regex. By default sub folders or - Datatype: `string` - Default Value: `^[^/]*\.csv$` - - ### Delete file before workflow execution If set to true this will clear the specified file before executing a workflow that writes to it. @@ -137,15 +110,10 @@ If set to true this will clear the specified file before executing a workflow th - Datatype: `boolean` - Default Value: `true` - - -### Optionally trim whitespace and non-printable characters. +### Optionally trim whitespace and non-printable characters If set to true, this will trim whitespace and non-printable characters from the contents of the CSV dataset. - ID: `trimWhitespaceAndNonPrintableCharacters` - Datatype: `boolean` - Default Value: `false` - - - diff --git a/docs/build/reference/dataset/neo4j.md b/docs/build/reference/dataset/neo4j.md index e852ac448..dbab4b8fd 100644 --- a/docs/build/reference/dataset/neo4j.md +++ b/docs/build/reference/dataset/neo4j.md @@ -8,9 +8,6 @@ tags: # Neo4j - - - Supports reading and writing Neo4j graphs. The following sections outline how graphs are generated and read back. For more information about Neo4j, please refer to the [Neo4j documentation](https://neo4j.com/docs/). @@ -52,6 +49,7 @@ In eccenca DataIntegration, URIs are typically used to uniquely identify classes While URIs are central in RDF, Neo4j does allow arbitrary names and does not have any special support for URIs. When generating Neo4j labels, properties and relationships, URIs will be shortened according to the following rules. + - If a registered project prefix matches a URI, a name `{prefixName}_{localPart}` will be generated. For instance, `http://xmlns.com/foaf/0.1/name` will become `foaf_name`. Note that underscores (`_`) are used instead of colons (`:`) to separate the namespace and the local name. The reason is that colons are reserved in the Cypher query language and some tools don't escape properly and fail on databases that use colons in names. @@ -73,7 +71,6 @@ This is meant to help understanding and does not aim to provide a precise mappin | object property | relationship | | graph | Do not exist in Neo4j, but labels can be used to mimic graphs. | - ## Parameter ### URI @@ -84,8 +81,6 @@ The URL to the Neo4j instance - Datatype: `string` - Default Value: `bolt://localhost:7687` - - ### User The Neo4j username for basic authentication. @@ -94,8 +89,6 @@ The Neo4j username for basic authentication. - Datatype: `string` - Default Value: `neo4j` - - ### Password The Neo4j password for basic authentication. @@ -104,8 +97,6 @@ The Neo4j password for basic authentication. - Datatype: `password` - Default Value: `PASSWORD_PARAMETER:7vIY2uNcIiwSSo+/MNozEg==` - - ### Database Database (leave empty for default) @@ -114,8 +105,6 @@ Database (leave empty for default) - Datatype: `string` - Default Value: `None` - - ### Node label Neo4j label for all entities to be covered by this dataset. When reading, all nodes with this label will be read. When writing, this label will be added to all generated nodes. If the dataset is cleared, only nodes with this label will be deleted. @@ -124,8 +113,6 @@ Neo4j label for all entities to be covered by this dataset. When reading, all no - Datatype: `string` - Default Value: `Any` - - ### Clear before execution If set to true, all nodes with the specified label will be removed, before executing a workflow that writes to this graph. @@ -134,10 +121,6 @@ If set to true, all nodes with the specified label will be removed, before execu - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/office365preadsheet.md b/docs/build/reference/dataset/office365preadsheet.md index bbfa5bdf5..8b8f49fbe 100644 --- a/docs/build/reference/dataset/office365preadsheet.md +++ b/docs/build/reference/dataset/office365preadsheet.md @@ -8,8 +8,6 @@ tags: # Excel (OneDrive, Office365) - - The dataset needs the URL of a "share via link" sheet on Office 365/OneDrive as input. It will automatically construct a direct download URL, cache the download file handle it like an XLSX file in the Excel Dataset. @@ -22,9 +20,9 @@ Onedrive links look like `https://1drv.ms/x/s!AucULvzmJ-dsdfsfgaIcyWP_XY_G4w?e=y Onedrive (based one sharepoint, for businesses) links look like `https://eccencagmbh-my.sharepoint.com/:x:/g/personal/person_eccenca_com/EdEMTEw1dclHiEZXyvy8P4YBit8wSyGsiwU5Kt__sQOZzw` The first type should always work is not recommended for this dataset. The second type requires to set up an application in Microsoft EntraID (formerly Azure Active Directory). -EntraID: https://docs.microsoft.com/azure/active-directory/develop/v2-overview +EntraID: Instructions and examples can be found here: -https://github.com/Azure-Samples/ms-identity-msal-java-samples/tree/main/3-java-servlet-web-app/1-Authentication/sign-in + After following the steps access to sharepoint/onedrive for business can be setup in the application.conf file for eccenca DataIntegration. @@ -48,7 +46,6 @@ A file based cache is created to avoid CAPTCHAs. During the caching and validati access occurs with random wait times between 1 and 5 seconds. The cache is invalidated after 5 minutes by default. - ## Parameter ### URL @@ -59,8 +56,6 @@ Link to the document ('share with anyone having a link' must be enabled). - Datatype: `string` - Default Value: `None` - - ### Lines to skip The number of lines to skip in the beginning when reading files. @@ -69,10 +64,6 @@ The number of lines to skip in the beginning when reading files. - Datatype: `int` - Default Value: `0` - - - - ## Advanced Parameter ### Streaming @@ -83,8 +74,6 @@ Streaming enables reading and writing large Excels files. Warning: Be careful to - Datatype: `boolean` - Default Value: `true` - - ### Invalidate cache after Duration until file based cache is invalidated. @@ -92,6 +81,3 @@ Duration until file based cache is invalidated. - ID: `invalidateCacheAfter` - Datatype: `duration` - Default Value: `PT5M` - - - diff --git a/docs/build/reference/dataset/orc.md b/docs/build/reference/dataset/orc.md index 5a3e3b378..26dd08857 100644 --- a/docs/build/reference/dataset/orc.md +++ b/docs/build/reference/dataset/orc.md @@ -8,11 +8,8 @@ tags: # ORC - - Read from or write to an Apache ORC file. - ## Parameter ### File @@ -23,8 +20,6 @@ Path (e.g. relative like 'path/filename.orc' or absolute 'hdfs:///path/filename. - Datatype: `resource` - Default Value: `None` - - ### Uri pattern A pattern used to construct the entity URI. If not provided the prefix + the line number is used. An example of such a pattern is 'urn:zyx:{id}' where *id* is a name of a property. @@ -33,8 +28,6 @@ A pattern used to construct the entity URI. If not provided the prefix + the lin - Datatype: `string` - Default Value: `None` - - ### Properties Comma-separated list of URL-encoded properties. If not provided, the list of properties is read from the first line. @@ -43,8 +36,6 @@ Comma-separated list of URL-encoded properties. If not provided, the list of pro - Datatype: `string` - Default Value: `None` - - ### Partition Optional specification of the attribute for output partitioning @@ -53,8 +44,6 @@ Optional specification of the attribute for output partitioning - Datatype: `string` - Default Value: `None` - - ### Compression Optional compression algorithm (e.g. snappy, zlib) @@ -63,8 +52,6 @@ Optional compression algorithm (e.g. snappy, zlib) - Datatype: `string` - Default Value: `snappy` - - ### Charset The file encoding, e.g., UTF8, ISO-8859-1 @@ -73,10 +60,6 @@ The file encoding, e.g., UTF8, ISO-8859-1 - Datatype: `string` - Default Value: `UTF-8` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/parquet.md b/docs/build/reference/dataset/parquet.md index 5944ca685..1dc31a48e 100644 --- a/docs/build/reference/dataset/parquet.md +++ b/docs/build/reference/dataset/parquet.md @@ -8,11 +8,8 @@ tags: # Parquet - - Read from or write to an Apache Parquet file. - ## Parameter ### File @@ -23,8 +20,6 @@ Path (e.g. relative like 'path/filename.orc' or absolute 'hdfs:///path/filename. - Datatype: `resource` - Default Value: `None` - - ### Uri pattern A pattern used to construct the entity URI. If not provided the prefix + the line number is used. An example of such a pattern is 'urn:zyx:{id}' where *id* is a name of a property. @@ -33,8 +28,6 @@ A pattern used to construct the entity URI. If not provided the prefix + the lin - Datatype: `string` - Default Value: `None` - - ### Properties Comma-separated list of URL-encoded properties. If not provided, the list of properties is read from the first line. @@ -43,8 +36,6 @@ Comma-separated list of URL-encoded properties. If not provided, the list of pro - Datatype: `string` - Default Value: `None` - - ### Partition Optional specification of the attribute for output partitioning @@ -53,8 +44,6 @@ Optional specification of the attribute for output partitioning - Datatype: `string` - Default Value: `None` - - ### Compression Optional compression algorithm (e.g. snappy, zlib) @@ -63,8 +52,6 @@ Optional compression algorithm (e.g. snappy, zlib) - Datatype: `string` - Default Value: `None` - - ### Charset The file encoding, e.g., UTF8, ISO-8859-1 @@ -73,10 +60,6 @@ The file encoding, e.g., UTF8, ISO-8859-1 - Datatype: `string` - Default Value: `UTF-8` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/sparkView.md b/docs/build/reference/dataset/sparkView.md index 4288af738..d1e1f1810 100644 --- a/docs/build/reference/dataset/sparkView.md +++ b/docs/build/reference/dataset/sparkView.md @@ -8,11 +8,8 @@ tags: # Embedded Spark SQL view - - Deprecated: Use the embedded SQL endpoint dataset instead. - ## Parameter ### View name @@ -23,8 +20,6 @@ The name of the view. This specifies the table that can be queried by another vi - Datatype: `string` - Default Value: `None` - - ### Query Optional SQL query on the selected table. Has no effect when used as an output dataset. @@ -33,8 +28,6 @@ Optional SQL query on the selected table. Has no effect when used as an output d - Datatype: `string` - Default Value: `None` - - ### Cache Optional boolean option that selects if the table should be cached by Spark or not (default = true). @@ -43,8 +36,6 @@ Optional boolean option that selects if the table should be cached by Spark or n - Datatype: `boolean` - Default Value: `true` - - ### Uri pattern A pattern used to construct the entity URI. If not provided the prefix + the line number is used. An example of such a pattern is 'urn:zyx:{id}' where *id* is a name of a property. @@ -53,8 +44,6 @@ A pattern used to construct the entity URI. If not provided the prefix + the lin - Datatype: `string` - Default Value: `None` - - ### Properties Comma-separated list of URL-encoded properties. If not provided, the list of properties is read from the first line. @@ -63,8 +52,6 @@ Comma-separated list of URL-encoded properties. If not provided, the list of pro - Datatype: `string` - Default Value: `None` - - ### Charset The source internal encoding, e.g., UTF8, ISO-8859-1 @@ -73,8 +60,6 @@ The source internal encoding, e.g., UTF8, ISO-8859-1 - Datatype: `string` - Default Value: `UTF-8` - - ### Array separator The character that is used to separate the parts of array values. Write "back slash t" to specify the tab character. @@ -83,8 +68,6 @@ The character that is used to separate the parts of array values. Write "back sl - Datatype: `string` - Default Value: `|` - - ### Compatibility If true, basic types will be used for types that otherwise would result in client errors. This mainly that arrays will be stored as Strings separated by the separator defined above. If the view is only for use within a SparkContext, this can be set to false. @@ -93,10 +76,6 @@ If true, basic types will be used for types that otherwise would result in clien - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/sparqlEndpoint.md b/docs/build/reference/dataset/sparqlEndpoint.md index 76467151f..da7d57264 100644 --- a/docs/build/reference/dataset/sparqlEndpoint.md +++ b/docs/build/reference/dataset/sparqlEndpoint.md @@ -8,8 +8,6 @@ tags: # SPARQL endpoint - - The SPARQL endpoint plugin is a dataset for connecting to an existing, remote SPARQL endpoint. ## Description @@ -30,7 +28,7 @@ queries. Additionally, it can execute [updates](https://www.w3.org/TR/2013/REC-s ## Example usage A very simple example showcasing the usage of this plugin is the following idea: Use an online SPARQL Query Editor such -as https://dbpedia.org/sparql, with a simple SPARQL query like `select distinct ?Concept where {[] a ?Concept} LIMIT 10` +as , with a simple SPARQL query like `select distinct ?Concept where {[] a ?Concept} LIMIT 10` or similar. Use this plugin as a **source** dataset, and transform or transfer the SPARQL query results into a sink dataset such as a **CSV file**. A similar or related showcase example involves considering other output datasets such as an **in-memory dataset** or a **Knowledge Graph** such as the one handled by the `eccencaDataPlatform` plugin, which is @@ -46,7 +44,6 @@ information is short-lived or the dataset is small. A more durable and resilient The SPARQL dataset plugin can be used in conjunction with the **task** plugins for SPARQL `SELECT` and `CONSTRUCT` queries, i.e. the plugins `sparqlSelectOperator` and `sparqlCopyOperator`. - ## Parameter ### Endpoint URI @@ -57,8 +54,6 @@ The URI of the SPARQL endpoint, e.g. `http://dbpedia.org/sparql` - Datatype: `string` - Default Value: `None` - - ### Login Login required for authentication @@ -67,8 +62,6 @@ Login required for authentication - Datatype: `string` - Default Value: `None` - - ### Password Password required for authentication @@ -77,8 +70,6 @@ Password required for authentication - Datatype: `password` - Default Value: `None` - - ### Graph The URI of a named graph. If set, the SPARQL endpoint will only retrieve entities from that specific graph. @@ -87,8 +78,6 @@ The URI of a named graph. If set, the SPARQL endpoint will only retrieve entitie - Datatype: `string` - Default Value: `None` - - ### Strategy The strategy for retrieving entities. There are three options: `simple` retrieves all entities using a single query; `subQuery` also uses a single query, which is optimized for Virtuoso; `parallel` executes multiple queries in parallel, one for each entity property. @@ -97,8 +86,6 @@ The strategy for retrieving entities. There are three options: `simple` retrieve - Datatype: `enumeration` - Default Value: `parallel` - - ### Use order by Enforces the correct ordering of values, if set to `true` (default). @@ -107,8 +94,6 @@ Enforces the correct ordering of values, if set to `true` (default). - Datatype: `boolean` - Default Value: `true` - - ### Clear graph before workflow execution If set to `true`, this will clear the specified graph before executing a workflow that writes into it. @@ -117,20 +102,14 @@ If set to `true`, this will clear the specified graph before executing a workflo - Datatype: `boolean` - Default Value: `false` - - ### SPARQL query timeout (ms) -SPARQL query timeout in milliseconds. By default, a value of zero is used. This zero value has a symbolic character: it means that the timeout of SPARQL select and update queries is configured via the properties `silk.remoteSparqlEndpoint.defaults.connection.timeout.ms and `silk.remoteSparqlEndpoint.defaults.read.timeout.ms` for the default connection and read timeouts. To overwrite these configured values, specify a (common) timeout greater than zero milliseconds. +SPARQL query timeout in milliseconds. By default, a value of zero is used. This zero value has a symbolic character: it means that the timeout of SPARQL select and update queries is configured via the properties `silk.remoteSparqlEndpoint.defaults.connection.timeout.ms and`silk.remoteSparqlEndpoint.defaults.read.timeout.ms` for the default connection and read timeouts. To overwrite these configured values, specify a (common) timeout greater than zero milliseconds. - ID: `sparqlTimeout` - Datatype: `int` - Default Value: `0` - - - - ## Advanced Parameter ### Page size @@ -141,8 +120,6 @@ The number of entities to be retrieved per SPARQL query. This is the page size u - Datatype: `int` - Default Value: `1000` - - ### Entity list An optional list of entities to be retrieved. If not specified, all entities will be retrieved. Multiple entities need to be separated by whitespace. @@ -151,8 +128,6 @@ An optional list of entities to be retrieved. If not specified, all entities wil - Datatype: `multiline string` - Default Value: `None` - - ### Pause time The number of milliseconds to wait between subsequent queries @@ -161,8 +136,6 @@ The number of milliseconds to wait between subsequent queries - Datatype: `int` - Default Value: `0` - - ### Retry count The total number of retries to execute a (repeatedly) failing query @@ -171,8 +144,6 @@ The total number of retries to execute a (repeatedly) failing query - Datatype: `int` - Default Value: `3` - - ### Retry pause The number of milliseconds to wait until a previously failed query is executed again @@ -181,8 +152,6 @@ The number of milliseconds to wait until a previously failed query is executed a - Datatype: `int` - Default Value: `1000` - - ### Query parameters Additional parameters to be appended to every query, e.g. `&soft-limit=1` @@ -190,6 +159,3 @@ Additional parameters to be appended to every query, e.g. `&soft-limit=1` - ID: `queryParameters` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/dataset/sqlEndpoint.md b/docs/build/reference/dataset/sqlEndpoint.md index d100af1cc..c35074766 100644 --- a/docs/build/reference/dataset/sqlEndpoint.md +++ b/docs/build/reference/dataset/sqlEndpoint.md @@ -8,8 +8,6 @@ tags: # Embedded SQL endpoint - - _SQL endpoint dataset parameters_ The dataset only requires that the _tableNamePrefix_ parameter is given. This will be used as the prefix for the names of the generated tables. @@ -64,7 +62,6 @@ Any JDBC or ODBC client can connect to an SQL endpoint dataset. SparkSQL uses th A detailed instruction to connect to a Hive or SparkSQL endpoint with various tools (e.g. SQuirreL, beeline, SQL Developer, ...) can be found at _[Apache HiveServer2 Clients](https://cwiki.apache.org/confluence/display/Hive/HiveServer2+Clients)_. The database client _[DBeaver](https://dbeaver.io/)_ can connect to the SQL endpoint out of the box. - ## Parameter ### Table name prefix @@ -75,8 +72,6 @@ Prefix of the table that will be shared. In the case of complex mappings more th - Datatype: `string` - Default Value: `None` - - ### Cache Optional boolean option that selects if the table should be cached by Spark or not (default = true). @@ -85,8 +80,6 @@ Optional boolean option that selects if the table should be cached by Spark or n - Datatype: `boolean` - Default Value: `true` - - ### Array separator The character that is used to separate the parts of array values. Write \t to specify the tab character. @@ -95,8 +88,6 @@ The character that is used to separate the parts of array values. Write \t to sp - Datatype: `string` - Default Value: `|` - - ### Compatibility If true, basic types will be used for unusual data types that otherwise may result in client errors. Try switching this on, if a client has weird error messages. (Default = true) @@ -105,8 +96,6 @@ If true, basic types will be used for unusual data types that otherwise may resu - Datatype: `boolean` - Default Value: `true` - - ### Map Mapping of column names. Similar to aliases E.g. 'c1:c2' would rename column c1 into c2. @@ -115,10 +104,6 @@ Mapping of column names. Similar to aliases E.g. 'c1:c2' would rename column c1 - Datatype: `stringmap` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/dataset/text.md b/docs/build/reference/dataset/text.md index ed034bcec..98ea4a24f 100644 --- a/docs/build/reference/dataset/text.md +++ b/docs/build/reference/dataset/text.md @@ -8,19 +8,16 @@ tags: # Text - - Reads and writes plain text files. ## Writing -All values of each entity will be written as plain text. Multiple values per entity are separated by spaces. Each entity will be written to a new line. +All values of each entity will be written as plain text. Multiple values per entity are separated by spaces. Each entity will be written to a new line. ## Reading The entire text will be read as a single entity with a single property. Note that even if multiple entities have been written to this dataset before, those would still be read back as a single entity. The default type is `document`, the default path is `text`. Both values can be configured in the advanced section. - ## Parameter ### File @@ -31,8 +28,6 @@ The plain text file. May also be a zip archive containing multiple text files. - Datatype: `resource` - Default Value: `None` - - ### Charset The file encoding, e.g., UTF-8, UTF-8-BOM, ISO-8859-1 @@ -41,10 +36,6 @@ The file encoding, e.g., UTF-8, UTF-8-BOM, ISO-8859-1 - Datatype: `string` - Default Value: `UTF-8` - - - - ## Advanced Parameter ### Type name @@ -55,8 +46,6 @@ A type name that represents this file. - Datatype: `string` - Default Value: `document` - - ### Property The single property that holds the text. @@ -65,8 +54,6 @@ The single property that holds the text. - Datatype: `string` - Default Value: `text` - - ### ZIP file regex If the input resource is a ZIP file, files inside the file are filtered via this regex. @@ -74,6 +61,3 @@ If the input resource is a ZIP file, files inside the file are filtered via this - ID: `zipFileRegex` - Datatype: `string` - Default Value: `.*` - - - diff --git a/docs/build/reference/dataset/xml.md b/docs/build/reference/dataset/xml.md index df544b1bd..d1636cd6e 100644 --- a/docs/build/reference/dataset/xml.md +++ b/docs/build/reference/dataset/xml.md @@ -8,8 +8,6 @@ tags: # XML - - Typically, this dataset is used to transform an XML file to another format, e.g., to RDF. It can also be used to generate XML files. ## Reading @@ -55,7 +53,6 @@ Path examples: When writing XML, all entities need to possess a unique URI. Writing multiple root entities with the same URI will result in multiple entries in the generated XML. If multiple nested entities with the same URI are written, only the last entity with a given URI will be written. - ## Parameter ### File @@ -66,10 +63,6 @@ The XML file. This may also be a zip archive of multiple XML files that share th - Datatype: `resource` - Default Value: `None` - - - - ## Advanced Parameter ### Base path @@ -80,18 +73,14 @@ The base path when writing XML. For instance: /RootElement/Entity. Should no lon - Datatype: `string` - Default Value: `None` - - ### URI pattern -A URI pattern, e.g., http://namespace.org/{ID}, where {path} may contain relative paths to elements +A URI pattern, e.g., , where {path} may contain relative paths to elements - ID: `uriPattern` - Datatype: `string` - Default Value: `None` - - ### Output template The output template used for writing XML. Must be valid XML. The generated entity is identified through a processing instruction of the form . @@ -100,8 +89,6 @@ The output template used for writing XML. Must be valid XML. The generated entit - Datatype: `code-xml` - Default Value: `` - - ### Streaming Streaming allows for reading large XML files. @@ -110,8 +97,6 @@ Streaming allows for reading large XML files. - Datatype: `boolean` - Default Value: `true` - - ### Max depth Maximum depth of written XML. This acts as a safe guard if a recursive structure is written. @@ -120,8 +105,6 @@ Maximum depth of written XML. This acts as a safe guard if a recursive structure - Datatype: `int` - Default Value: `15` - - ### ZIP file regex If the input resource is a ZIP file, files inside the file are filtered via this regex. @@ -129,6 +112,3 @@ If the input resource is a ZIP file, files inside the file are filtered via this - ID: `zipFileRegex` - Datatype: `string` - Default Value: `^(?!.*[\/\\]\..*$|^\..*$).*\.xml$` - - - diff --git a/docs/build/reference/distancemeasure/PhysicalQuantitiesDistance.md b/docs/build/reference/distancemeasure/PhysicalQuantitiesDistance.md index ee37a94e7..84f11af1f 100644 --- a/docs/build/reference/distancemeasure/PhysicalQuantitiesDistance.md +++ b/docs/build/reference/distancemeasure/PhysicalQuantitiesDistance.md @@ -8,8 +8,6 @@ tags: # Compare physical quantities - - Computes the distance between two physical quantities. This "distance" is to be understood in a mathematical or abstract sense of _absolute difference_. The distance is normalized to the SI base unit of the dimension. @@ -22,6 +20,7 @@ Comparing incompatible units will yield a validation error. Time is expressed in seconds (symbol: `s`). The following alternative symbols are supported: + * `mo_s`: day*29.53059 * `mo_g`: year/12.0 * `a`: day*365.25 @@ -34,11 +33,11 @@ The following alternative symbols are supported: * `a_t`: day*365.24219 * `d`: day - ### Length Length is expressed in metres (symbol: `m`). The following alternative symbols are supported: + * `in`: c(cm*254.0) * `nmi`: m*1852.0 * `Ao`: dnm @@ -51,11 +50,11 @@ The following alternative symbols are supported: * `mi`: ((c(cm*254.0))*12.0)*5280.0 * `hd`: (c(cm*254.0))*4.0 - ### Mass Mass is expressed in kilograms (symbol: `kg`). The following alternative symbols are supported: + * `lb`: lb * `ston`: hlb*20.0 * `t`: Mg @@ -69,21 +68,20 @@ The following alternative symbols are supported: * `dr`: oz/16.0 * `lton`: (lb*112.0)*20.0 - ### Electric current Electric current is expressed in amperes (symbol: `A`). The following alternative symbols are supported: + * `Bi`: daA * `Gb`: cm·(A/m)*250.0/[one?] - ### Temperature Temperature is expressed in kelvins (symbol: `K`). The following alternative symbols are supported: -* `Cel`: ℃ +* `Cel`: ℃ ### Amount of substance @@ -97,6 +95,7 @@ Luminous intensity is expressed in candelas (symbol: `cd`). Area is expressed in square metres (symbol: `m²`). The following alternative symbols are supported: + * `m2`: m² * `ar`: hm² * `syd`: ((c(cm*254.0))*12.0)*3.0² @@ -105,11 +104,11 @@ The following alternative symbols are supported: * `sft`: (c(cm*254.0))*12.0² * `sin`: c(cm*254.0)² - ### Volume Volume is expressed in cubic metres (symbol: `㎥`). The following alternative symbols are supported: + * `st`: [㎥?] * `bf`: (c(cm*254.0)³)*144.0 * `cyd`: ((c(cm*254.0))*12.0)*3.0³ @@ -120,71 +119,71 @@ The following alternative symbols are supported: * `cft`: (c(cm*254.0))*12.0³ * `m3`: ㎥ - ### Energy Energy is expressed in joules (symbol: `J`). The following alternative symbols are supported: + * `cal_IT`: (J*41868.0)/10000.0 * `eV`: J*1.602176487E-19 * `cal_m`: (J*419002.0)/100000.0 * `cal`: m(J*4184.0) * `cal_th`: m(J*4184.0) - ### Angle Angle is expressed in radians (symbol: `rad`). The following alternative symbols are supported: + * `circ`: [one?]·rad*2.0 * `gon`: ([one?]·rad/180.0)*0.9 * `deg`: [one?]·rad/180.0 * `'`: ([one?]·rad/180.0)/60.0 * `''`: (([one?]·rad/180.0)/60.0)/60.0 - ### Others -- `1/m`, derived units: `Ky`: c(1/m) -- `kg/(m·s)`, derived units: `P`: g/(s·cm) -- `bit/s`, derived units: `Bd`: bit/s -- `bit`, derived units: `By`: bit*8.0 -- `Sv` -- `N` -- `Ω`, derived units: `Ohm`: Ω -- `T`, derived units: `G`: T/10000.0 -- `sr`, derived units: `sph`: [one?]·sr*4.0 -- `F` -- `C/kg`, derived units: `R`: (C/kg)*2.58E-4 -- `cd/m²`, derived units: `sb`: cd/cm², `Lmb`: cd/([one?]·cm²) -- `Pa`, derived units: `bar`: Pa*100000.0, `atm`: Pa*101325.0 -- `kg/(m·s²)`, derived units: `att`: k(g·(m/s²)*9.80665)/cm² -- `m²/s`, derived units: `St`: cm²/s -- `A/m`, derived units: `Oe`: (A/m)*250.0/[one?] -- `kg·m²/s²`, derived units: `erg`: cm²·g/s² -- `kg/m³`, derived units: `g%`: g/dl -- `mho` -- `V` -- `lx`, derived units: `ph`: lx/10000.0 -- `m/s²`, derived units: `Gal`: cm/s², `m/s2`: m/s² -- `m/s`, derived units: `kn`: m*1852.0/h -- `m·kg/s²`, derived units: `gf`: g·(m/s²)*9.80665, `lbf`: lb·(m/s²)*9.80665, `dyn`: cm·g/s² -- `m²/s²`, derived units: `RAD`: cm²·g/(s²·hg), `REM`: cm²·g/(s²·hg) -- `C` -- `Gy` -- `Hz` -- `H` -- `lm` -- `W` -- `Wb`, derived units: `Mx`: Wb/1.0E8 -- `Bq`, derived units: `Ci`: Bq*3.7E10 -- `S` - +* `1/m`, derived units: `Ky`: c(1/m) +* `kg/(m·s)`, derived units: `P`: g/(s·cm) +* `bit/s`, derived units: `Bd`: bit/s +* `bit`, derived units: `By`: bit*8.0 +* `Sv` +* `N` +* `Ω`, derived units: `Ohm`: Ω +* `T`, derived units: `G`: T/10000.0 +* `sr`, derived units: `sph`: [one?]·sr*4.0 +* `F` +* `C/kg`, derived units: `R`: (C/kg)*2.58E-4 +* `cd/m²`, derived units: `sb`: cd/cm², `Lmb`: cd/([one?]·cm²) +* `Pa`, derived units: `bar`: Pa*100000.0, `atm`: Pa*101325.0 +* `kg/(m·s²)`, derived units: `att`: k(g·(m/s²)*9.80665)/cm² +* `m²/s`, derived units: `St`: cm²/s +* `A/m`, derived units: `Oe`: (A/m)*250.0/[one?] +* `kg·m²/s²`, derived units: `erg`: cm²·g/s² +* `kg/m³`, derived units: `g%`: g/dl +* `mho` +* `V` +* `lx`, derived units: `ph`: lx/10000.0 +* `m/s²`, derived units: `Gal`: cm/s², `m/s2`: m/s² +* `m/s`, derived units: `kn`: m*1852.0/h +* `m·kg/s²`, derived units: `gf`: g·(m/s²)*9.80665, `lbf`: lb·(m/s²)*9.80665, `dyn`: cm·g/s² +* `m²/s²`, derived units: `RAD`: cm²·g/(s²·hg), `REM`: cm²·g/(s²·hg) +* `C` +* `Gy` +* `Hz` +* `H` +* `lm` +* `W` +* `Wb`, derived units: `Mx`: Wb/1.0E8 +* `Bq`, derived units: `Ci`: Bq*3.7E10 +* `S` ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. + ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -193,49 +192,40 @@ Compares single values (as opposed to sequences of values). If multiple values a **Convert SI unit prefixes:** * Input values: - - Source: `[1 km]` - - Target: `[500 m]` + * Source: `[1 km]` + * Target: `[500 m]` * Returns: `500.0` - --- **Convert imperial and metric values:** * Input values: - - Source: `[1 km]` - - Target: `[1 mi]` + * Source: `[1 km]` + * Target: `[1 mi]` * Returns: `609.344` - --- **Validate if the compared units of measurement are compatible:** * Input values: - - Source: `[1 km]` - - Target: `[1 kg]` + * Source: `[1 km]` + * Target: `[1 kg]` * Returns: `NaN` * **Throws error:** `ValidationException` - - - ## Parameter ### Number format The IETF BCP 47 language tag, e.g., 'en'. -- ID: `numberFormat` -- Datatype: `string` -- Default Value: `en` - - - - +* ID: `numberFormat` +* Datatype: `string` +* Default Value: `en` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/cjkReadingDistance.md b/docs/build/reference/distancemeasure/cjkReadingDistance.md index aa10ccd08..c6bb8b8e9 100644 --- a/docs/build/reference/distancemeasure/cjkReadingDistance.md +++ b/docs/build/reference/distancemeasure/cjkReadingDistance.md @@ -8,11 +8,10 @@ tags: # CJK reading distance - - CJK Reading Distance. ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,8 +26,6 @@ No description - Datatype: `char` - Default Value: `0` - - ### Max char No description @@ -37,10 +34,6 @@ No description - Datatype: `char` - Default Value: `z` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/constantDistance.md b/docs/build/reference/distancemeasure/constantDistance.md index faceb1d54..0e3a7d55d 100644 --- a/docs/build/reference/distancemeasure/constantDistance.md +++ b/docs/build/reference/distancemeasure/constantDistance.md @@ -8,11 +8,10 @@ tags: # Constant similarity value - - Always returns a constant similarity value. ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. ## Parameter @@ -25,10 +24,6 @@ No description - Datatype: `double` - Default Value: `1.0` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/cosine.md b/docs/build/reference/distancemeasure/cosine.md index 41d2d0b47..f6fa1d447 100644 --- a/docs/build/reference/distancemeasure/cosine.md +++ b/docs/build/reference/distancemeasure/cosine.md @@ -8,11 +8,10 @@ tags: # Cosine - - Cosine Distance Measure. ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,10 +26,6 @@ No description - Datatype: `int` - Default Value: `3` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/date.md b/docs/build/reference/distancemeasure/date.md index 8a92a75ee..5f638e95f 100644 --- a/docs/build/reference/distancemeasure/date.md +++ b/docs/build/reference/distancemeasure/date.md @@ -8,14 +8,14 @@ tags: # Date - - The distance in days between two dates ('YYYY-MM-DD' format). ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. + ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -24,82 +24,74 @@ Compares single values (as opposed to sequences of values). If multiple values a **Returns 0 if both dates are equal:** * Input values: - - Source: `[2003-03-01]` - - Target: `[2003-03-01]` + * Source: `[2003-03-01]` + * Target: `[2003-03-01]` * Returns: `0.0` - --- **Returns 1 if both dates are one day apart:** * Input values: - - Source: `[2003-03-01]` - - Target: `[2003-03-02]` + * Source: `[2003-03-01]` + * Target: `[2003-03-02]` * Returns: `1.0` - --- **Returns the number of days if both dates are one month apart:** * Input values: - - Source: `[2003-03-01]` - - Target: `[2003-04-01]` + * Source: `[2003-03-01]` + * Target: `[2003-04-01]` * Returns: `31.0` - --- **Returns the number of days if both dates are one year apart:** * Input values: - - Source: `[2018-03-01]` - - Target: `[2019-03-01]` + * Source: `[2018-03-01]` + * Target: `[2019-03-01]` * Returns: `365.0` - --- **Time of day is ignored:** * Input values: - - Source: `[2003-03-01]` - - Target: `[2003-03-01T06:00:00]` + * Source: `[2003-03-01]` + * Target: `[2003-03-01T06:00:00]` * Returns: `0.0` - --- **Missing days are set to 1 by default:** * Input values: - - Source: `[2003-01]` - - Target: `[2003-01-01]` + * Source: `[2003-01]` + * Target: `[2003-01-01]` * Returns: `0.0` - --- **Missing months are set to 1 by default:** * Input values: - - Source: `[2003]` - - Target: `[2003-01-01]` + * Source: `[2003]` + * Target: `[2003-01-01]` * Returns: `0.0` - --- **Missing months and days are set to 1 by default:** * Input values: - - Source: `[2018]` - - Target: `[2019]` + * Source: `[2018]` + * Target: `[2019]` * Returns: `365.0` - --- **If 'requireMonthAndDay' is set, dates without a day and month will not match:** @@ -107,12 +99,11 @@ Compares single values (as opposed to sequences of values). If multiple values a * requireMonthAndDay: `true` * Input values: - - Source: `[2003]` - - Target: `[2003-03-01]` + * Source: `[2003]` + * Target: `[2003-03-01]` * Returns: `Infinity` - --- **If 'requireMonthAndDay' is set, dates without a day will not match:** @@ -120,28 +111,21 @@ Compares single values (as opposed to sequences of values). If multiple values a * requireMonthAndDay: `true` * Input values: - - Source: `[2003-12]` - - Target: `[2003-03-01]` + * Source: `[2003-12]` + * Target: `[2003-03-01]` * Returns: `Infinity` - - - ## Parameter ### Require month and day If true, no distance value will be generated if months or days are missing (e.g., 2019-11). If false, missing month or day fields will default to 1. -- ID: `requireMonthAndDay` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `requireMonthAndDay` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/dateTime.md b/docs/build/reference/distancemeasure/dateTime.md index 4807fe785..c3530dee0 100644 --- a/docs/build/reference/distancemeasure/dateTime.md +++ b/docs/build/reference/distancemeasure/dateTime.md @@ -8,11 +8,10 @@ tags: # DateTime - - Distance between two date time values (xsd:dateTime format) in seconds. ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -23,4 +22,4 @@ Compares single values (as opposed to sequences of values). If multiple values a ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/dice.md b/docs/build/reference/distancemeasure/dice.md index b75d2436e..9bc7879aa 100644 --- a/docs/build/reference/distancemeasure/dice.md +++ b/docs/build/reference/distancemeasure/dice.md @@ -8,11 +8,10 @@ tags: # Dice coefficient - - Dice similarity coefficient. ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares sets of multiple values.Typically, incoming values are tokenized before being fed into this measure. @@ -23,4 +22,4 @@ Compares sets of multiple values.Typically, incoming values are tokenized before ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/equality.md b/docs/build/reference/distancemeasure/equality.md index 0d277b759..4e12096fc 100644 --- a/docs/build/reference/distancemeasure/equality.md +++ b/docs/build/reference/distancemeasure/equality.md @@ -8,14 +8,14 @@ tags: # String equality - - Checks for equality of the string representation of the given values. Returns success if string values are equal, failure otherwise. For a numeric comparison of values use the 'Numeric Equality' comparator. ## Characteristics + This is a boolean distance measure, i.e., all distances are either 0 or 1. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. + ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -24,28 +24,24 @@ Compares single values (as opposed to sequences of values). If multiple values a **Returns distance 0, if at least one value matches:** * Input values: - - Source: `[max, helmut]` - - Target: `[max]` + * Source: `[max, helmut]` + * Target: `[max]` * Returns: `0.0` - --- **Returns distance 1, if no value matches:** * Input values: - - Source: `[max, helmut]` - - Target: `[john]` + * Source: `[max, helmut]` + * Target: `[john]` * Returns: `1.0` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/greaterThan.md b/docs/build/reference/distancemeasure/greaterThan.md index ea0d525d0..7dbc9301b 100644 --- a/docs/build/reference/distancemeasure/greaterThan.md +++ b/docs/build/reference/distancemeasure/greaterThan.md @@ -8,11 +8,10 @@ tags: # Greater than - - Checks if the source value is greater than the target value. If both strings are numbers, numerical order is used for comparison. Otherwise, alphanumerical order is used. ## Characteristics + This is a boolean distance measure, i.e., all distances are either 0 or 1. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,8 +26,6 @@ Accept equal values - Datatype: `boolean` - Default Value: `false` - - ### Order Per default, if both strings are numbers, numerical order is used for comparison. Otherwise, alphanumerical order is used. Choose a more specific order for improved performance. @@ -37,10 +34,6 @@ Per default, if both strings are numbers, numerical order is used for comparison - Datatype: `enumeration` - Default Value: `Autodetect` - - - - ## Advanced Parameter ### Reverse @@ -50,6 +43,3 @@ Reverse source and target inputs - ID: `reverse` - Datatype: `boolean` - Default Value: `false` - - - diff --git a/docs/build/reference/distancemeasure/inequality.md b/docs/build/reference/distancemeasure/inequality.md index 1b58a1011..99a305aa7 100644 --- a/docs/build/reference/distancemeasure/inequality.md +++ b/docs/build/reference/distancemeasure/inequality.md @@ -8,14 +8,14 @@ tags: # Inequality - - Returns success if values are not equal, failure otherwise. ## Characteristics + This is a boolean distance measure, i.e., all distances are either 0 or 1. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. + ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -24,48 +24,42 @@ Compares single values (as opposed to sequences of values). If multiple values a **Returns distance 0, if the values are different:** * Input values: - - Source: `[max]` - - Target: `[john]` + * Source: `[max]` + * Target: `[john]` * Returns: `0.0` - --- **Returns distance 1, if the values are equal:** * Input values: - - Source: `[max]` - - Target: `[max]` + * Source: `[max]` + * Target: `[max]` * Returns: `1.0` - --- **If multiple values are provided, returns 0, if at least one value does not match:** * Input values: - - Source: `[max, helmut]` - - Target: `[max]` + * Source: `[max, helmut]` + * Target: `[max]` * Returns: `0.0` - --- **If multiple values are provided, returns 1, if all value match:** * Input values: - - Source: `[max, max]` - - Target: `[max, max]` + * Source: `[max, max]` + * Target: `[max, max]` * Returns: `1.0` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/insideNumericInterval.md b/docs/build/reference/distancemeasure/insideNumericInterval.md index 01eb235da..99f49114f 100644 --- a/docs/build/reference/distancemeasure/insideNumericInterval.md +++ b/docs/build/reference/distancemeasure/insideNumericInterval.md @@ -8,11 +8,10 @@ tags: # Inside numeric interval - - Checks if a number is contained inside a numeric interval, such as '1900 - 2000'. ## Characteristics + This is a boolean distance measure, i.e., all distances are either 0 or 1. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,10 +26,6 @@ No description - Datatype: `string` - Default Value: `—|–|-` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/isSubstring.md b/docs/build/reference/distancemeasure/isSubstring.md index 9527bfb1c..3383cd6c7 100644 --- a/docs/build/reference/distancemeasure/isSubstring.md +++ b/docs/build/reference/distancemeasure/isSubstring.md @@ -8,11 +8,10 @@ tags: # Is substring - - Checks if a source value is a substring of a target value. ## Characteristics + This is a boolean distance measure, i.e., all distances are either 0 or 1. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,10 +26,6 @@ Reverse source and target inputs - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/jaccard.md b/docs/build/reference/distancemeasure/jaccard.md index 53d67e222..940910c19 100644 --- a/docs/build/reference/distancemeasure/jaccard.md +++ b/docs/build/reference/distancemeasure/jaccard.md @@ -8,14 +8,14 @@ tags: # Jaccard - - Jaccard similarity coefficient. Divides the matching tokens by the number of distinct tokens from both inputs. ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares sets of multiple values.Typically, incoming values are tokenized before being fed into this measure. + ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -24,48 +24,42 @@ Compares sets of multiple values.Typically, incoming values are tokenized before **Returns 0 for equal sets of values:** * Input values: - - Source: `[A, B, C]` - - Target: `[B, C, A]` + * Source: `[A, B, C]` + * Target: `[B, C, A]` * Returns: `0.0` - --- **Returns 1 if there is no overlap between both sets of tokens:** * Input values: - - Source: `[A, B, C]` - - Target: `[D, E, F]` + * Source: `[A, B, C]` + * Target: `[D, E, F]` * Returns: `1.0` - --- **Returns 0.5 if half of all unique tokens overlap:** * Input values: - - Source: `[A, B, C]` - - Target: `[A, B, D]` + * Source: `[A, B, C]` + * Target: `[A, B, D]` * Returns: `0.5` - --- **Returns 2/3 if one third of all unique tokens overlap:** * Input values: - - Source: `[John, Jane]` - - Target: `[John, Max]` + * Source: `[John, Jane]` + * Target: `[John, Max]` * Returns: `0.6666666666666666` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/jaro.md b/docs/build/reference/distancemeasure/jaro.md index d8e5cabe2..31faf8587 100644 --- a/docs/build/reference/distancemeasure/jaro.md +++ b/docs/build/reference/distancemeasure/jaro.md @@ -8,13 +8,12 @@ tags: # Jaro distance - - -The Jaro distance measure calculates the similarity between two strings based on the number and order of common characters, the number of transpositions, and the length of the strings. The Jaro distance is 0 for a perfect match and 1 if there is no similarity between the given strings. +The Jaro distance measure calculates the similarity between two strings based on the number and order of common characters, the number of transpositions, and the length of the strings. The Jaro distance is 0 for a perfect match and 1 if there is no similarity between the given strings. For more information, please refer to: [https://en.wikipedia.org/wiki/Jaro–Winkler_distance](https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance). ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -25,4 +24,4 @@ Compares single values (as opposed to sequences of values). If multiple values a ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/jaroWinkler.md b/docs/build/reference/distancemeasure/jaroWinkler.md index e56b45a3b..d62628766 100644 --- a/docs/build/reference/distancemeasure/jaroWinkler.md +++ b/docs/build/reference/distancemeasure/jaroWinkler.md @@ -8,13 +8,12 @@ tags: # Jaro-Winkler distance - - The Jaro-Winkler distance measure is a variation of the Jaro distance metric. It takes into account the prefixes of the strings being compared and assigns higher weights to matching prefixes. For more information, please refer to: [https://en.wikipedia.org/wiki/Jaro–Winkler_distance](https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance). ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -25,4 +24,4 @@ Compares single values (as opposed to sequences of values). If multiple values a ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/koreanPhonemeDistance.md b/docs/build/reference/distancemeasure/koreanPhonemeDistance.md index dce4c5bdd..4e8e5d887 100644 --- a/docs/build/reference/distancemeasure/koreanPhonemeDistance.md +++ b/docs/build/reference/distancemeasure/koreanPhonemeDistance.md @@ -8,11 +8,10 @@ tags: # Korean phoneme distance - - Korean phoneme distance. ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,8 +26,6 @@ No description - Datatype: `char` - Default Value: `0` - - ### Max char No description @@ -37,10 +34,6 @@ No description - Datatype: `char` - Default Value: `z` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/koreanTranslitDistance.md b/docs/build/reference/distancemeasure/koreanTranslitDistance.md index 1000cf128..23f574860 100644 --- a/docs/build/reference/distancemeasure/koreanTranslitDistance.md +++ b/docs/build/reference/distancemeasure/koreanTranslitDistance.md @@ -8,11 +8,10 @@ tags: # Korean translit distance - - Transliterated Korean distance. ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,8 +26,6 @@ No description - Datatype: `char` - Default Value: `0` - - ### Max char No description @@ -37,10 +34,6 @@ No description - Datatype: `char` - Default Value: `z` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/levenshtein.md b/docs/build/reference/distancemeasure/levenshtein.md index f58a89d44..e33687daa 100644 --- a/docs/build/reference/distancemeasure/levenshtein.md +++ b/docs/build/reference/distancemeasure/levenshtein.md @@ -8,14 +8,14 @@ tags: # Normalized Levenshtein distance - - Normalized Levenshtein distance. Divides the edit distance by the length of the longer string. ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. + ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -24,44 +24,38 @@ Compares single values (as opposed to sequences of values). If multiple values a **Returns 0 for equal strings:** * Input values: - - Source: `[John]` - - Target: `[John]` + * Source: `[John]` + * Target: `[John]` * Returns: `0.0` - --- **Returns 1/4 if two strings of length 4 differ by one edit operation:** * Input values: - - Source: `[John]` - - Target: `[Jxhn]` + * Source: `[John]` + * Target: `[Jxhn]` * Returns: `0.25` - --- **Normalizes the edit distance by the length of the longer string:** * Input values: - - Source: `[John]` - - Target: `[Jhn]` + * Source: `[John]` + * Target: `[Jhn]` * Returns: `0.25` - --- **Returns the maximum distance of 1 for completely different strings:** * Input values: - - Source: `[John]` - - Target: `[Clara]` + * Source: `[John]` + * Target: `[Clara]` * Returns: `1.0` - - - ## Parameter `None` @@ -72,29 +66,22 @@ Compares single values (as opposed to sequences of values). If multiple values a The size of the q-grams to be indexed. Setting this to zero will disable indexing. -- ID: `qGramsSize` -- Datatype: `int` -- Default Value: `2` - - +* ID: `qGramsSize` +* Datatype: `int` +* Default Value: `2` ### Min char The minimum character that is used for indexing -- ID: `minChar` -- Datatype: `char` -- Default Value: `0` - - +* ID: `minChar` +* Datatype: `char` +* Default Value: `0` ### Max char The maximum character that is used for indexing -- ID: `maxChar` -- Datatype: `char` -- Default Value: `z` - - - +* ID: `maxChar` +* Datatype: `char` +* Default Value: `z` diff --git a/docs/build/reference/distancemeasure/levenshteinDistance.md b/docs/build/reference/distancemeasure/levenshteinDistance.md index 51dfe24e3..97dc0b389 100644 --- a/docs/build/reference/distancemeasure/levenshteinDistance.md +++ b/docs/build/reference/distancemeasure/levenshteinDistance.md @@ -8,14 +8,14 @@ tags: # Levenshtein distance - - Levenshtein distance. Returns a distance value between zero and the size of the string. ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. + ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -24,34 +24,29 @@ Compares single values (as opposed to sequences of values). If multiple values a **Returns 0 for equal strings:** * Input values: - - Source: `[John]` - - Target: `[John]` + * Source: `[John]` + * Target: `[John]` * Returns: `0.0` - --- **Returns 1 for strings that differ by one edit operation:** * Input values: - - Source: `[John]` - - Target: `[Jxhn]` + * Source: `[John]` + * Target: `[Jxhn]` * Returns: `1.0` - --- **Returns 3 for strings that differ by three edit operations:** * Input values: - - Source: `[Saturday]` - - Target: `[Sunday]` + * Source: `[Saturday]` + * Target: `[Sunday]` * Returns: `3.0` - - - ## Parameter `None` @@ -62,29 +57,22 @@ Compares single values (as opposed to sequences of values). If multiple values a The size of the q-grams to be indexed. Setting this to zero will disable indexing. -- ID: `qGramsSize` -- Datatype: `int` -- Default Value: `2` - - +* ID: `qGramsSize` +* Datatype: `int` +* Default Value: `2` ### Min char The minimum character that is used for indexing -- ID: `minChar` -- Datatype: `char` -- Default Value: `0` - - +* ID: `minChar` +* Datatype: `char` +* Default Value: `0` ### Max char The maximum character that is used for indexing -- ID: `maxChar` -- Datatype: `char` -- Default Value: `z` - - - +* ID: `maxChar` +* Datatype: `char` +* Default Value: `z` diff --git a/docs/build/reference/distancemeasure/lowerThan.md b/docs/build/reference/distancemeasure/lowerThan.md index f4e005772..3633149d0 100644 --- a/docs/build/reference/distancemeasure/lowerThan.md +++ b/docs/build/reference/distancemeasure/lowerThan.md @@ -8,11 +8,10 @@ tags: # Lower than - - Checks if the source value is lower than the target value. ## Characteristics + This is a boolean distance measure, i.e., all distances are either 0 or 1. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,8 +26,6 @@ Accept equal values - Datatype: `boolean` - Default Value: `false` - - ### Order Per default, if both strings are numbers, numerical order is used for comparison. Otherwise, alphanumerical order is used. Choose a more specific order for improved performance. @@ -37,10 +34,6 @@ Per default, if both strings are numbers, numerical order is used for comparison - Datatype: `enumeration` - Default Value: `Autodetect` - - - - ## Advanced Parameter ### Reverse @@ -50,6 +43,3 @@ Reverse source and target inputs - ID: `reverse` - Datatype: `boolean` - Default Value: `false` - - - diff --git a/docs/build/reference/distancemeasure/num.md b/docs/build/reference/distancemeasure/num.md index 8e4802032..bae92bb7e 100644 --- a/docs/build/reference/distancemeasure/num.md +++ b/docs/build/reference/distancemeasure/num.md @@ -8,11 +8,10 @@ tags: # Numeric similarity - - Computes the numeric distance between two numbers. ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -31,8 +30,6 @@ The minimum number that is used for indexing - Datatype: `double` - Default Value: `-Infinity` - - ### Max index value The maximum number that is used for indexing @@ -40,6 +37,3 @@ The maximum number that is used for indexing - ID: `maxValue` - Datatype: `double` - Default Value: `Infinity` - - - diff --git a/docs/build/reference/distancemeasure/numericEquality.md b/docs/build/reference/distancemeasure/numericEquality.md index ae1347392..c667a72a1 100644 --- a/docs/build/reference/distancemeasure/numericEquality.md +++ b/docs/build/reference/distancemeasure/numericEquality.md @@ -8,16 +8,16 @@ tags: # Numeric equality - - Compares values numerically instead of their string representation as the 'String Equality' operator does. Allows to set the needed precision of the comparison. A value of 0.0 means that the values must represent exactly the same (floating point) value, values higher than that allow for a margin of tolerance. ## Characteristics + This is a boolean distance measure, i.e., all distances are either 0 or 1. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. + ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -26,22 +26,20 @@ Compares single values (as opposed to sequences of values). If multiple values a **Returns 0 for equal numbers:** * Input values: - - Source: `[4.2]` - - Target: `[4.2]` + * Source: `[4.2]` + * Target: `[4.2]` * Returns: `0.0` - --- **Returns 1 if at least one value is not a number:** * Input values: - - Source: `[1]` - - Target: `[one]` + * Source: `[1]` + * Target: `[one]` * Returns: `1.0` - --- **Returns 0 for numbers within the configured precision:** @@ -49,12 +47,11 @@ Compares single values (as opposed to sequences of values). If multiple values a * precision: `0.1` * Input values: - - Source: `[1.3]` - - Target: `[1.35]` + * Source: `[1.3]` + * Target: `[1.35]` * Returns: `0.0` - --- **Returns 1 for numbers outside the configured precision:** @@ -62,28 +59,21 @@ Compares single values (as opposed to sequences of values). If multiple values a * precision: `0.1` * Input values: - - Source: `[1.3]` - - Target: `[1.5]` + * Source: `[1.3]` + * Target: `[1.5]` * Returns: `1.0` - - - ## Parameter ### Precision The range of tolerance in floating point number comparisons. Must be 0 or a non-negative number smaller than 1. -- ID: `precision` -- Datatype: `double` -- Default Value: `0.0` - - - - +* ID: `precision` +* Datatype: `double` +* Default Value: `0.0` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/qGrams.md b/docs/build/reference/distancemeasure/qGrams.md index 2ee49cadc..4998c64a9 100644 --- a/docs/build/reference/distancemeasure/qGrams.md +++ b/docs/build/reference/distancemeasure/qGrams.md @@ -8,14 +8,14 @@ tags: # qGrams - - String similarity based on q-grams (by default q=2). ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. + ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -24,47 +24,38 @@ Compares single values (as opposed to sequences of values). If multiple values a **Returns 0.0 if the input strings are equal:** * Input values: - - Source: `[abcd]` - - Target: `[abcd]` + * Source: `[abcd]` + * Target: `[abcd]` * Returns: `0.0` - --- **Returns 1.0 if the input strings do not share a single q-gram:** * Input values: - - Source: `[abcd]` - - Target: `[dcba]` + * Source: `[abcd]` + * Target: `[dcba]` * Returns: `1.0` - --- **Returns 1 minus the matching q-grams divided by the total number of q-grams. Generated q-grams in this example: (#a, ab, b#) and (#a, ac, c#):** * Input values: - - Source: `[ab]` - - Target: `[ac]` + * Source: `[ab]` + * Target: `[ac]` * Returns: `0.8` - - - ## Parameter ### Q No description -- ID: `q` -- Datatype: `int` -- Default Value: `2` - - - - +* ID: `q` +* Datatype: `int` +* Default Value: `2` ## Advanced Parameter @@ -72,19 +63,14 @@ No description The minimum character that is used for indexing -- ID: `minChar` -- Datatype: `char` -- Default Value: `0` - - +* ID: `minChar` +* Datatype: `char` +* Default Value: `0` ### Max char The maximum character that is used for indexing -- ID: `maxChar` -- Datatype: `char` -- Default Value: `z` - - - +* ID: `maxChar` +* Datatype: `char` +* Default Value: `z` diff --git a/docs/build/reference/distancemeasure/relaxedEquality.md b/docs/build/reference/distancemeasure/relaxedEquality.md index 6c826fcf2..65510d05c 100644 --- a/docs/build/reference/distancemeasure/relaxedEquality.md +++ b/docs/build/reference/distancemeasure/relaxedEquality.md @@ -8,11 +8,10 @@ tags: # Relaxed equality - - Return success if strings are equal, failure otherwise. Lower/upper case and differences like ö/o, n/ñ, c/ç etc. are treated as equal. ## Characteristics + This is a boolean distance measure, i.e., all distances are either 0 or 1. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -23,4 +22,4 @@ Compares single values (as opposed to sequences of values). If multiple values a ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/softjaccard.md b/docs/build/reference/distancemeasure/softjaccard.md index 9bff4ecc9..6e32f53ca 100644 --- a/docs/build/reference/distancemeasure/softjaccard.md +++ b/docs/build/reference/distancemeasure/softjaccard.md @@ -8,11 +8,10 @@ tags: # Soft Jaccard - - Soft Jaccard similarity coefficient. Same as Jaccard distance but values within an levenhstein distance of 'maxDistance' are considered equivalent. ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares sets of multiple values.Typically, incoming values are tokenized before being fed into this measure. @@ -27,10 +26,6 @@ No description - Datatype: `int` - Default Value: `1` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/startsWith.md b/docs/build/reference/distancemeasure/startsWith.md index 4edc7fe77..bafb6777f 100644 --- a/docs/build/reference/distancemeasure/startsWith.md +++ b/docs/build/reference/distancemeasure/startsWith.md @@ -8,11 +8,10 @@ tags: # Starts with - - Returns success if the first string starts with the second string, failure otherwise. ## Characteristics + This is a boolean distance measure, i.e., all distances are either 0 or 1. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,8 +26,6 @@ Reverse source and target values - Datatype: `boolean` - Default Value: `false` - - ### Min length The minimum length of the string being contained. @@ -37,8 +34,6 @@ The minimum length of the string being contained. - Datatype: `int` - Default Value: `2` - - ### Max length The potential maximum length of the strings that must match. If the max length is greater than the length of the string to match, the full string must match. @@ -47,10 +42,6 @@ The potential maximum length of the strings that must match. If the max length i - Datatype: `int` - Default Value: `2147483647` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/substringDistance.md b/docs/build/reference/distancemeasure/substringDistance.md index c7ba80957..11d5105aa 100644 --- a/docs/build/reference/distancemeasure/substringDistance.md +++ b/docs/build/reference/distancemeasure/substringDistance.md @@ -8,11 +8,10 @@ tags: # Substring comparison - - Return 0 to 1 for strong similarity to weak similarity. Based on the paper: Stoilos, Giorgos, Giorgos Stamou, and Stefanos Kollias. "A string metric for ontology alignment." The Semantic Web-ISWC 2005. Springer Berlin Heidelberg, 2005. 624-637. ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,10 +26,6 @@ The minimum length of a possible substring match. - Datatype: `string` - Default Value: `3` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/distancemeasure/tokenwiseDistance.md b/docs/build/reference/distancemeasure/tokenwiseDistance.md index e8111ef8c..34976d55f 100644 --- a/docs/build/reference/distancemeasure/tokenwiseDistance.md +++ b/docs/build/reference/distancemeasure/tokenwiseDistance.md @@ -8,11 +8,10 @@ tags: # Token-wise distance - - Token-wise string distance using the specified metric. ## Characteristics + This distance measure is normalized, i.e., all distances are between 0 (exact match) and 1 (no similarity). Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,8 +26,6 @@ No description - Datatype: `boolean` - Default Value: `true` - - ### Metric name No description @@ -37,8 +34,6 @@ No description - Datatype: `string` - Default Value: `levenshtein` - - ### Split regex No description @@ -47,8 +42,6 @@ No description - Datatype: `string` - Default Value: `[\s\d\p{Punct}]+` - - ### Stopwords No description @@ -57,8 +50,6 @@ No description - Datatype: `string` - Default Value: `None` - - ### Match threshold No description @@ -67,8 +58,6 @@ No description - Datatype: `double` - Default Value: `0.0` - - ### Ordering impact No description @@ -77,8 +66,6 @@ No description - Datatype: `double` - Default Value: `0.0` - - ### Adjust by token length No description @@ -87,10 +74,6 @@ No description - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter ### Stopword weight @@ -101,8 +84,6 @@ Weight assigned to stopwords - Datatype: `double` - Default Value: `0.01` - - ### Non stopword weight Weight assigned to non-stopwords @@ -111,8 +92,6 @@ Weight assigned to non-stopwords - Datatype: `double` - Default Value: `0.1` - - ### Use incremental idf weights Use incremental IDF weights @@ -120,6 +99,3 @@ Use incremental IDF weights - ID: `useIncrementalIdfWeights` - Datatype: `boolean` - Default Value: `false` - - - diff --git a/docs/build/reference/distancemeasure/wgs84.md b/docs/build/reference/distancemeasure/wgs84.md index a57fd8578..64296411c 100644 --- a/docs/build/reference/distancemeasure/wgs84.md +++ b/docs/build/reference/distancemeasure/wgs84.md @@ -8,11 +8,10 @@ tags: # Geographical distance - - Computes the geographical distance between two points. Author: Konrad Höffner (MOLE subgroup of Research Group AKSW, University of Leipzig) ## Characteristics + This distance measure is not normalized, i.e., all distances start at 0 (exact match) and increase the more different the values are. Compares single values (as opposed to sequences of values). If multiple values are provided, all values are compared and the lowest distance is returned. @@ -27,10 +26,6 @@ No description - Datatype: `string` - Default Value: `km` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/index.md b/docs/build/reference/index.md index 3aa61a3e1..03ec5cb90 100644 --- a/docs/build/reference/index.md +++ b/docs/build/reference/index.md @@ -12,34 +12,34 @@ tags:
-- [Aggregators](aggregator/index.md) +- [Aggregators](aggregator/index.md) --- This kind of task aggregates multiple similarity scores. -- [Custom Workflow Tasks](customtask/index.md) +- [Custom Workflow Tasks](customtask/index.md) --- An operator that can be used in a workflow. -- [Datasets](dataset/index.md) +- [Datasets](dataset/index.md) --- A collection of data that can be read or written. -- [Distance Measures](distancemeasure/index.md) +- [Distance Measures](distancemeasure/index.md) --- Computes the distance between two sets of strings. -- [Transformer](transformer/index.md) +- [Transformer](transformer/index.md) --- Transforms a sequence of string values. -
\ No newline at end of file + diff --git a/docs/build/reference/transformer/Combine/concat.md b/docs/build/reference/transformer/Combine/concat.md index d90e9892a..6287f4d79 100644 --- a/docs/build/reference/transformer/Combine/concat.md +++ b/docs/build/reference/transformer/Combine/concat.md @@ -8,8 +8,6 @@ tags: # Concatenate - - Concatenates strings from multiple inputs. ## Examples @@ -21,7 +19,6 @@ Concatenates strings from multiple inputs. * Returns: `[]` - --- **Example 2:** @@ -30,7 +27,6 @@ Concatenates strings from multiple inputs. * Returns: `[a]` - --- **Example 3:** @@ -40,7 +36,6 @@ Concatenates strings from multiple inputs. * Returns: `[ab]` - --- **Example 4:** @@ -53,7 +48,6 @@ Concatenates strings from multiple inputs. * Returns: `[First-Last]` - --- **Example 5:** @@ -66,7 +60,6 @@ Concatenates strings from multiple inputs. * Returns: `[First-Second, First-Third]` - --- **Example 6:** @@ -80,7 +73,6 @@ Concatenates strings from multiple inputs. * Returns: `[First--Second]` - --- **Example 7:** @@ -94,7 +86,6 @@ Concatenates strings from multiple inputs. * Returns: `[]` - --- **Example 8:** @@ -109,7 +100,6 @@ Concatenates strings from multiple inputs. * Returns: `[First--Second]` - --- **Example 9:** @@ -120,13 +110,13 @@ Concatenates strings from multiple inputs. 1. `[First]` 2. `[Second]` -* Returns: +* Returns: + ``` [First Second] ``` - --- **Example 10:** @@ -137,10 +127,7 @@ Concatenates strings from multiple inputs. 1. `[First]` 2. `[Second]` -* Returns: `[First \\aSecond]` - - - +* Returns: `[First \\aSecond]` ## Parameter @@ -148,24 +135,18 @@ Concatenates strings from multiple inputs. Separator to be inserted between two concatenated strings. The text can contain escaped characters \n, \t and \\ that are replaced by a newline, tab or backslash respectively. -- ID: `glue` -- Datatype: `string` -- Default Value: `None` - - +* ID: `glue` +* Datatype: `string` +* Default Value: `None` ### Missing values as empty strings Handle missing values as empty strings. -- ID: `missingValuesAsEmptyStrings` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `missingValuesAsEmptyStrings` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Combine/concatMultiValues.md b/docs/build/reference/transformer/Combine/concatMultiValues.md index ce3e33c63..527583536 100644 --- a/docs/build/reference/transformer/Combine/concatMultiValues.md +++ b/docs/build/reference/transformer/Combine/concatMultiValues.md @@ -8,8 +8,6 @@ tags: # Concatenate multiple values - - Concatenates multiple values received for an input. If applied to multiple inputs, yields at most one value per input. Optionally removes duplicate values. ## Examples @@ -21,7 +19,6 @@ Concatenates multiple values received for an input. If applied to multiple input * Returns: `[]` - --- **Example 2:** @@ -30,7 +27,6 @@ Concatenates multiple values received for an input. If applied to multiple input * Returns: `[a]` - --- **Example 3:** @@ -39,7 +35,6 @@ Concatenates multiple values received for an input. If applied to multiple input * Returns: `[ab]` - --- **Example 4:** @@ -51,7 +46,6 @@ Concatenates multiple values received for an input. If applied to multiple input * Returns: `[axb]` - --- **Example 5:** @@ -61,7 +55,6 @@ Concatenates multiple values received for an input. If applied to multiple input * Returns: `[ab, 12]` - --- **Example 6:** @@ -69,46 +62,39 @@ Concatenates multiple values received for an input. If applied to multiple input * glue: `\n\t\\` * Input values: - 1. + 1. + ``` [a - \b, c] + \b, c] ``` -* Returns: +* Returns: + ``` [a - \b - \c] + \b + \c] ``` - - - ## Parameter ### Glue No description -- ID: `glue` -- Datatype: `string` -- Default Value: `None` - - +* ID: `glue` +* Datatype: `string` +* Default Value: `None` ### Remove duplicates No description -- ID: `removeDuplicates` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `removeDuplicates` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Combine/concatPairwise.md b/docs/build/reference/transformer/Combine/concatPairwise.md index 3a073b5fe..fa98976e2 100644 --- a/docs/build/reference/transformer/Combine/concatPairwise.md +++ b/docs/build/reference/transformer/Combine/concatPairwise.md @@ -8,8 +8,6 @@ tags: # Concatenate pairwise - - Concatenates the values of multiple inputs pairwise. ## Examples @@ -25,7 +23,6 @@ Concatenates the values of multiple inputs pairwise. * Returns: `[a1, b2, c3]` - --- **More than two inputs are supported as well:** @@ -36,7 +33,6 @@ Concatenates the values of multiple inputs pairwise. * Returns: `[a1x, b2y, c3z]` - --- **If one of the inputs has more values than the other, its remaining values are ignored:** @@ -46,13 +42,11 @@ Concatenates the values of multiple inputs pairwise. * Returns: `[a1, b2]` - --- **Empty input leads to empty output:** * Returns: `[]` - --- **A single input is just forwarded:** @@ -61,23 +55,16 @@ Concatenates the values of multiple inputs pairwise. * Returns: `[a]` - - - ## Parameter ### Glue Separator to be inserted between two concatenated strings. The text can contain escaped characters \n, \t and \\ that are replaced by a newline, tab or backslash respectively. -- ID: `glue` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `glue` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Combine/merge.md b/docs/build/reference/transformer/Combine/merge.md index afaec47d0..4bcd5cefe 100644 --- a/docs/build/reference/transformer/Combine/merge.md +++ b/docs/build/reference/transformer/Combine/merge.md @@ -8,8 +8,6 @@ tags: # Merge - - Merges the values of all inputs. ## Examples @@ -21,7 +19,6 @@ Merges the values of all inputs. * Returns: `[]` - --- **Example 2:** @@ -31,13 +28,10 @@ Merges the values of all inputs. * Returns: `[a, b, c]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Conditional/containsAllOf.md b/docs/build/reference/transformer/Conditional/containsAllOf.md index e5811f742..041e3336a 100644 --- a/docs/build/reference/transformer/Conditional/containsAllOf.md +++ b/docs/build/reference/transformer/Conditional/containsAllOf.md @@ -8,8 +8,6 @@ tags: # Contains all of - - Accepts two inputs. If the first input contains all of the second input values it returns 'true', else 'false' is returned. ## Examples @@ -25,7 +23,6 @@ Accepts two inputs. If the first input contains all of the second input values i * Returns: `[true]` - --- **Example 2:** @@ -35,7 +32,6 @@ Accepts two inputs. If the first input contains all of the second input values i * Returns: `[false]` - --- **Example 3:** @@ -45,7 +41,6 @@ Accepts two inputs. If the first input contains all of the second input values i * Returns: `[false]` - --- **Example 4:** @@ -55,7 +50,6 @@ Accepts two inputs. If the first input contains all of the second input values i * Returns: `[true]` - --- **Example 5:** @@ -66,7 +60,6 @@ Accepts two inputs. If the first input contains all of the second input values i * Returns: `[]` * **Throws error:** `IllegalArgumentException` - --- **Example 6:** @@ -78,7 +71,6 @@ Accepts two inputs. If the first input contains all of the second input values i * Returns: `[]` * **Throws error:** `IllegalArgumentException` - --- **Example 7:** @@ -88,13 +80,10 @@ Accepts two inputs. If the first input contains all of the second input values i * Returns: `[]` * **Throws error:** `IllegalArgumentException` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Conditional/containsAnyOf.md b/docs/build/reference/transformer/Conditional/containsAnyOf.md index a57df7283..57a04e1e7 100644 --- a/docs/build/reference/transformer/Conditional/containsAnyOf.md +++ b/docs/build/reference/transformer/Conditional/containsAnyOf.md @@ -8,8 +8,6 @@ tags: # Contains any of - - Accepts two inputs. If the first input contains any of the second input values it returns 'true', else 'false' is returned. ## Examples @@ -25,7 +23,6 @@ Accepts two inputs. If the first input contains any of the second input values i * Returns: `[true]` - --- **Example 2:** @@ -35,7 +32,6 @@ Accepts two inputs. If the first input contains any of the second input values i * Returns: `[true]` - --- **Example 3:** @@ -45,7 +41,6 @@ Accepts two inputs. If the first input contains any of the second input values i * Returns: `[false]` - --- **Example 4:** @@ -55,7 +50,6 @@ Accepts two inputs. If the first input contains any of the second input values i * Returns: `[true]` - --- **Example 5:** @@ -66,7 +60,6 @@ Accepts two inputs. If the first input contains any of the second input values i * Returns: `[]` * **Throws error:** `IllegalArgumentException` - --- **Example 6:** @@ -78,7 +71,6 @@ Accepts two inputs. If the first input contains any of the second input values i * Returns: `[]` * **Throws error:** `IllegalArgumentException` - --- **Example 7:** @@ -88,13 +80,10 @@ Accepts two inputs. If the first input contains any of the second input values i * Returns: `[]` * **Throws error:** `IllegalArgumentException` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Conditional/ifContains.md b/docs/build/reference/transformer/Conditional/ifContains.md index c53abc8c0..2049b2ded 100644 --- a/docs/build/reference/transformer/Conditional/ifContains.md +++ b/docs/build/reference/transformer/Conditional/ifContains.md @@ -8,8 +8,6 @@ tags: # If contains - - Accepts two or three inputs. If the first input contains the given value, the second input is forwarded. Otherwise, the third input is forwarded (if present). ## Examples @@ -28,7 +26,6 @@ Accepts two or three inputs. If the first input contains the given value, the se * Returns: `[this is a match]` - --- **Example 2:** @@ -41,7 +38,6 @@ Accepts two or three inputs. If the first input contains the given value, the se * Returns: `[]` - --- **Example 3:** @@ -55,23 +51,16 @@ Accepts two or three inputs. If the first input contains the given value, the se * Returns: `[this is no match]` - - - ## Parameter ### Search No description -- ID: `search` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `search` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Conditional/ifExists.md b/docs/build/reference/transformer/Conditional/ifExists.md index 8c554b52a..70f8f4b5c 100644 --- a/docs/build/reference/transformer/Conditional/ifExists.md +++ b/docs/build/reference/transformer/Conditional/ifExists.md @@ -8,8 +8,6 @@ tags: # If exists - - Accepts two or three inputs. If the first input provides a value, the second input is forwarded. Otherwise, the third input is forwarded (if present). ## Examples @@ -26,7 +24,6 @@ Accepts two or three inputs. If the first input provides a value, the second inp * Returns: `[yes]` - --- **Example 2:** @@ -37,7 +34,6 @@ Accepts two or three inputs. If the first input provides a value, the second inp * Returns: `[no]` - --- **Example 3:** @@ -47,13 +43,10 @@ Accepts two or three inputs. If the first input provides a value, the second inp * Returns: `[]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Conditional/ifMatchesRegex.md b/docs/build/reference/transformer/Conditional/ifMatchesRegex.md index 9796d750c..ffa63508f 100644 --- a/docs/build/reference/transformer/Conditional/ifMatchesRegex.md +++ b/docs/build/reference/transformer/Conditional/ifMatchesRegex.md @@ -8,8 +8,6 @@ tags: # If matches regex - - ## Description The `ifMatchesRegex` plugin uses a regular expression as a matching condition in order to distinguish which input to @@ -85,7 +83,6 @@ take, `validateRegex` is used for _validating_ the input, `regexReplace` _replac * Returns: `[should be taken]` - --- **returns the third input if the regex does not match the first input:** @@ -100,7 +97,6 @@ take, `validateRegex` is used for _validating_ the input, `regexReplace` _replac * Returns: `[last value should be taken]` - --- **returns an empty value if the regex does not match the first input:** @@ -114,33 +110,24 @@ take, `validateRegex` is used for _validating_ the input, `regexReplace` _replac * Returns: `[]` - - - ## Parameter ### Regex No description -- ID: `regex` -- Datatype: `string` -- Default Value: `None` - - +* ID: `regex` +* Datatype: `string` +* Default Value: `None` ### Negate No description -- ID: `negate` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `negate` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Conditional/negateTransformer.md b/docs/build/reference/transformer/Conditional/negateTransformer.md index aa357a594..e09a6f577 100644 --- a/docs/build/reference/transformer/Conditional/negateTransformer.md +++ b/docs/build/reference/transformer/Conditional/negateTransformer.md @@ -8,8 +8,6 @@ tags: # Negate binary (NOT) - - Accepts one input, which is either 'true', '1' or 'false', '0' and negates it. ## Examples @@ -24,7 +22,6 @@ Accepts one input, which is either 'true', '1' or 'false', '0' and negates it. * Returns: `[1, 0, true, false, true, false]` - --- **Example 2:** @@ -34,7 +31,6 @@ Accepts one input, which is either 'true', '1' or 'false', '0' and negates it. * Returns: `[]` * **Throws error:** `IllegalArgumentException` - --- **Example 3:** @@ -44,13 +40,10 @@ Accepts one input, which is either 'true', '1' or 'false', '0' and negates it. * Returns: `[]` * **Throws error:** `IllegalArgumentException` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Conversion/convertCharset.md b/docs/build/reference/transformer/Conversion/convertCharset.md index c4aa0d2d6..22bcf38d9 100644 --- a/docs/build/reference/transformer/Conversion/convertCharset.md +++ b/docs/build/reference/transformer/Conversion/convertCharset.md @@ -8,11 +8,8 @@ tags: # Convert charset - - Convert the string from "sourceCharset" to "targetCharset". - ## Parameter ### Source charset @@ -23,8 +20,6 @@ No description - Datatype: `string` - Default Value: `ISO-8859-1` - - ### Target charset No description @@ -33,10 +28,6 @@ No description - Datatype: `string` - Default Value: `UTF-8` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/compareDates.md b/docs/build/reference/transformer/Date/compareDates.md index fc9bbf60d..5726eb65e 100644 --- a/docs/build/reference/transformer/Date/compareDates.md +++ b/docs/build/reference/transformer/Date/compareDates.md @@ -8,8 +8,6 @@ tags: # Compare dates - - Compares two dates. Returns 1 if the comparison yields true and 0 otherwise. If there are multiple dates in both sets, the comparator must be true for all dates. @@ -31,7 +29,6 @@ For instance, `[2014-08-02, 2014-08-03]` < `[2014-08-03]` yields 0 as not all da * Returns: `[1]` - --- **Example 2:** @@ -44,7 +41,6 @@ For instance, `[2014-08-02, 2014-08-03]` < `[2014-08-03]` yields 0 as not all da * Returns: `[0]` - --- **Example 3:** @@ -57,7 +53,6 @@ For instance, `[2014-08-02, 2014-08-03]` < `[2014-08-03]` yields 0 as not all da * Returns: `[1]` - --- **Example 4:** @@ -70,7 +65,6 @@ For instance, `[2014-08-02, 2014-08-03]` < `[2014-08-03]` yields 0 as not all da * Returns: `[0]` - --- **Example 5:** @@ -83,7 +77,6 @@ For instance, `[2014-08-02, 2014-08-03]` < `[2014-08-03]` yields 0 as not all da * Returns: `[1]` - --- **Example 6:** @@ -96,23 +89,16 @@ For instance, `[2014-08-02, 2014-08-03]` < `[2014-08-03]` yields 0 as not all da * Returns: `[0]` - - - ## Parameter ### Comparator No description -- ID: `comparator` -- Datatype: `enumeration` -- Default Value: `<` - - - - +* ID: `comparator` +* Datatype: `enumeration` +* Default Value: `<` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/currentDate.md b/docs/build/reference/transformer/Date/currentDate.md index f199ae24e..30289a646 100644 --- a/docs/build/reference/transformer/Date/currentDate.md +++ b/docs/build/reference/transformer/Date/currentDate.md @@ -8,15 +8,12 @@ tags: # Current date - - Outputs the current date. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/datetoTimestamp.md b/docs/build/reference/transformer/Date/datetoTimestamp.md index 498ffbd33..a87b58050 100644 --- a/docs/build/reference/transformer/Date/datetoTimestamp.md +++ b/docs/build/reference/transformer/Date/datetoTimestamp.md @@ -8,8 +8,6 @@ tags: # Date to timestamp - - Convert an xsd:dateTime to a timestamp. Returns the passed time since the Unix Epoch (1970-01-01). ## Examples @@ -24,7 +22,6 @@ Convert an xsd:dateTime to a timestamp. Returns the passed time since the Unix E * Returns: `[1499117572000]` - --- **Example 2:** @@ -33,7 +30,6 @@ Convert an xsd:dateTime to a timestamp. Returns the passed time since the Unix E * Returns: `[1499113972000]` - --- **Example 3:** @@ -45,7 +41,6 @@ Convert an xsd:dateTime to a timestamp. Returns the passed time since the Unix E * Returns: `[1499113972]` - --- **Example 4:** @@ -54,23 +49,16 @@ Convert an xsd:dateTime to a timestamp. Returns the passed time since the Unix E * Returns: `[1499040000000]` - - - ## Parameter ### Unit No description -- ID: `unit` -- Datatype: `enumeration` -- Default Value: `milliseconds` - - - - +* ID: `unit` +* Datatype: `enumeration` +* Default Value: `milliseconds` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/duration.md b/docs/build/reference/transformer/Date/duration.md index 804d37f2c..73f9ac07a 100644 --- a/docs/build/reference/transformer/Date/duration.md +++ b/docs/build/reference/transformer/Date/duration.md @@ -8,15 +8,12 @@ tags: # Duration - - Computes the time difference between two data times. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/durationInDays.md b/docs/build/reference/transformer/Date/durationInDays.md index 9ca73deb5..94f9f4b05 100644 --- a/docs/build/reference/transformer/Date/durationInDays.md +++ b/docs/build/reference/transformer/Date/durationInDays.md @@ -8,15 +8,12 @@ tags: # Duration in days - - Converts an xsd:duration to days. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/durationInSeconds.md b/docs/build/reference/transformer/Date/durationInSeconds.md index 846ee3516..083123fe7 100644 --- a/docs/build/reference/transformer/Date/durationInSeconds.md +++ b/docs/build/reference/transformer/Date/durationInSeconds.md @@ -8,15 +8,12 @@ tags: # Duration in seconds - - Converts an xsd:duration to seconds. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/durationInYears.md b/docs/build/reference/transformer/Date/durationInYears.md index 3bf9a5e5e..a3f0e22cb 100644 --- a/docs/build/reference/transformer/Date/durationInYears.md +++ b/docs/build/reference/transformer/Date/durationInYears.md @@ -8,15 +8,12 @@ tags: # Duration in years - - Converts an xsd:duration to years. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/numberToDuration.md b/docs/build/reference/transformer/Date/numberToDuration.md index bcea640a4..d1d4dffb8 100644 --- a/docs/build/reference/transformer/Date/numberToDuration.md +++ b/docs/build/reference/transformer/Date/numberToDuration.md @@ -8,11 +8,8 @@ tags: # Number to duration - - Converts a number to an xsd:duration. - ## Parameter ### Unit @@ -23,10 +20,6 @@ No description - Datatype: `enumeration` - Default Value: `day` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/parseDate.md b/docs/build/reference/transformer/Date/parseDate.md index 15c7dc920..f0b368ebd 100644 --- a/docs/build/reference/transformer/Date/parseDate.md +++ b/docs/build/reference/transformer/Date/parseDate.md @@ -8,8 +8,6 @@ tags: # Parse date pattern - - Parses a date based on a specified pattern, returning an xsd:date. ## Examples @@ -27,7 +25,6 @@ Parses a date based on a specified pattern, returning an xsd:date. * Returns: `[2015-04-03]` - --- **Example 2:** @@ -39,7 +36,6 @@ Parses a date based on a specified pattern, returning an xsd:date. * Returns: `[2015-04-03]` - --- **Example 3:** @@ -51,7 +47,6 @@ Parses a date based on a specified pattern, returning an xsd:date. * Returns: `[2015-04-03]` - --- **Example 4:** @@ -64,7 +59,6 @@ Parses a date based on a specified pattern, returning an xsd:date. * Returns: `[2024-05-01]` - --- **Example 5:** @@ -77,7 +71,6 @@ Parses a date based on a specified pattern, returning an xsd:date. * Returns: `[2024-05-01]` - --- **Example 6:** @@ -91,7 +84,6 @@ Parses a date based on a specified pattern, returning an xsd:date. * Returns: `[]` * **Throws error:** `ValidationException` - --- **Example 7:** @@ -105,43 +97,32 @@ Parses a date based on a specified pattern, returning an xsd:date. * Returns: `[]` * **Throws error:** `ValidationException` - - - ## Parameter ### Format The date pattern used to parse the input values -- ID: `format` -- Datatype: `string` -- Default Value: `dd-MM-yyyy` - - +* ID: `format` +* Datatype: `string` +* Default Value: `dd-MM-yyyy` ### Lenient If set to true, the parser tries to use heuristics to parse dates with invalid fields (such as a day of zero). -- ID: `lenient` -- Datatype: `boolean` -- Default Value: `false` - - +* ID: `lenient` +* Datatype: `boolean` +* Default Value: `false` ### Locale Optional locale for the date format. If not set the system's locale will be used. -- ID: `locale` -- Datatype: `option[locale]` -- Default Value: `None` - - - - +* ID: `locale` +* Datatype: `option[locale]` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Date/timeToDate.md b/docs/build/reference/transformer/Date/timeToDate.md index a06ae0642..6979b9736 100644 --- a/docs/build/reference/transformer/Date/timeToDate.md +++ b/docs/build/reference/transformer/Date/timeToDate.md @@ -8,8 +8,6 @@ tags: # Timestamp to date - - Convert a timestamp to xsd:date format. Expects an integer that denotes the passed time since the Unix Epoch (1970-01-01) ## Examples @@ -24,7 +22,6 @@ Convert a timestamp to xsd:date format. Expects an integer that denotes the pass * Returns: `[2017-07-03T21:32:52Z]` - --- **Example 2:** @@ -36,7 +33,6 @@ Convert a timestamp to xsd:date format. Expects an integer that denotes the pass * Returns: `[2017-07-03]` - --- **Example 3:** @@ -49,33 +45,24 @@ Convert a timestamp to xsd:date format. Expects an integer that denotes the pass * Returns: `[2017-07-03]` - - - ## Parameter ### Format Custom output format (e.g., 'yyyy-MM-dd'). If left empty, a full xsd:dateTime (UTC) is returned. -- ID: `format` -- Datatype: `string` -- Default Value: `None` - - +* ID: `format` +* Datatype: `string` +* Default Value: `None` ### Unit No description -- ID: `unit` -- Datatype: `enumeration` -- Default Value: `milliseconds` - - - - +* ID: `unit` +* Datatype: `enumeration` +* Default Value: `milliseconds` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ABS.md b/docs/build/reference/transformer/Excel/Excel_ABS.md index e551f4857..8d5f4866e 100644 --- a/docs/build/reference/transformer/Excel/Excel_ABS.md +++ b/docs/build/reference/transformer/Excel/Excel_ABS.md @@ -8,8 +8,6 @@ tags: # Abs - - Excel ABS(number): Returns the absolute value of the given number. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ABS` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ACOS.md b/docs/build/reference/transformer/Excel/Excel_ACOS.md index a7cc6a0b5..e00b31dfb 100644 --- a/docs/build/reference/transformer/Excel/Excel_ACOS.md +++ b/docs/build/reference/transformer/Excel/Excel_ACOS.md @@ -8,8 +8,6 @@ tags: # Acos - - Excel ACOS(number): Returns the inverse cosine of the given number in radians. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ACOS` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ACOSH.md b/docs/build/reference/transformer/Excel/Excel_ACOSH.md index e6f6a9416..27baf6a7f 100644 --- a/docs/build/reference/transformer/Excel/Excel_ACOSH.md +++ b/docs/build/reference/transformer/Excel/Excel_ACOSH.md @@ -8,8 +8,6 @@ tags: # Acosh - - Excel ACOSH(number): Returns the inverse hyperbolic cosine of the given number in radians. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ACOSH` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_AND.md b/docs/build/reference/transformer/Excel/Excel_AND.md index 869f644d8..fc4af8aae 100644 --- a/docs/build/reference/transformer/Excel/Excel_AND.md +++ b/docs/build/reference/transformer/Excel/Excel_AND.md @@ -8,8 +8,6 @@ tags: # And - - Excel AND(argument1; argument2 ...argument30): Returns TRUE if all the arguments are considered TRUE, and FALSE otherwise. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `AND` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ASIN.md b/docs/build/reference/transformer/Excel/Excel_ASIN.md index d0b2cd3f8..f5fedabb6 100644 --- a/docs/build/reference/transformer/Excel/Excel_ASIN.md +++ b/docs/build/reference/transformer/Excel/Excel_ASIN.md @@ -8,8 +8,6 @@ tags: # Asin - - Excel ASIN(number): Returns the inverse sine of the given number in radians. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ASIN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ASINH.md b/docs/build/reference/transformer/Excel/Excel_ASINH.md index 5aa2653b3..c277d8098 100644 --- a/docs/build/reference/transformer/Excel/Excel_ASINH.md +++ b/docs/build/reference/transformer/Excel/Excel_ASINH.md @@ -8,8 +8,6 @@ tags: # Asinh - - Excel ASINH(number): Returns the inverse hyperbolic sine of the given number in radians. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ASINH` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ATAN.md b/docs/build/reference/transformer/Excel/Excel_ATAN.md index b4852b827..e9b6d267a 100644 --- a/docs/build/reference/transformer/Excel/Excel_ATAN.md +++ b/docs/build/reference/transformer/Excel/Excel_ATAN.md @@ -8,8 +8,6 @@ tags: # Atan - - Excel ATAN(number): Returns the inverse tangent of the given number in radians. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ATAN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ATAN2.md b/docs/build/reference/transformer/Excel/Excel_ATAN2.md index 81215550f..8a2f99db9 100644 --- a/docs/build/reference/transformer/Excel/Excel_ATAN2.md +++ b/docs/build/reference/transformer/Excel/Excel_ATAN2.md @@ -8,8 +8,6 @@ tags: # Atan2 - - Excel ATAN2(number_x; number_y): Returns the inverse tangent of the specified x and y coordinates. Number_x is the value for the x coordinate. Number_y is the value for the y coordinate. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ATAN2` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ATANH.md b/docs/build/reference/transformer/Excel/Excel_ATANH.md index 0f807acee..96ec1d6de 100644 --- a/docs/build/reference/transformer/Excel/Excel_ATANH.md +++ b/docs/build/reference/transformer/Excel/Excel_ATANH.md @@ -8,8 +8,6 @@ tags: # Atanh - - Excel ATANH(number): Returns the inverse hyperbolic tangent of the given number. (Angle is returned in radians.) ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ATANH` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_AVEDEV.md b/docs/build/reference/transformer/Excel/Excel_AVEDEV.md index 27ba796ba..4c0a25b8a 100644 --- a/docs/build/reference/transformer/Excel/Excel_AVEDEV.md +++ b/docs/build/reference/transformer/Excel/Excel_AVEDEV.md @@ -8,8 +8,6 @@ tags: # Avedev - - Excel AVEDEV(number1; number2; ... number_30): Returns the average of the absolute deviations of data points from their mean. Displays the diffusion in a data set. Number_1; number_2; ... number_30 are values or ranges that represent a sample. Each number can also be replaced by a reference. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `AVEDEV` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_AVERAGE.md b/docs/build/reference/transformer/Excel/Excel_AVERAGE.md index 680bb380c..f98b066d4 100644 --- a/docs/build/reference/transformer/Excel/Excel_AVERAGE.md +++ b/docs/build/reference/transformer/Excel/Excel_AVERAGE.md @@ -8,8 +8,6 @@ tags: # Average - - Excel AVERAGE(number_1; number_2; ... number_30): Returns the average of the arguments. Number_1; number_2; ... number_30 are numerical values or ranges. Text is ignored. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `AVERAGE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_AVERAGEA.md b/docs/build/reference/transformer/Excel/Excel_AVERAGEA.md index d38574e58..992a46a0d 100644 --- a/docs/build/reference/transformer/Excel/Excel_AVERAGEA.md +++ b/docs/build/reference/transformer/Excel/Excel_AVERAGEA.md @@ -8,8 +8,6 @@ tags: # Averagea - - Excel AVERAGEA(value_1; value_2; ... value_30): Returns the average of the arguments. The value of a text is 0. Value_1; value_2; ... value_30 are values or ranges. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `AVERAGEA` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_CEILING.md b/docs/build/reference/transformer/Excel/Excel_CEILING.md index 7b6717a12..e41ed050d 100644 --- a/docs/build/reference/transformer/Excel/Excel_CEILING.md +++ b/docs/build/reference/transformer/Excel/Excel_CEILING.md @@ -8,8 +8,6 @@ tags: # Ceiling - - Excel CEILING(number; significance; mode): Rounds the given number to the nearest integer or multiple of significance. Significance is the value to whose multiple of ten the value is to be rounded up (.01, .1, 1, 10, etc.). Mode is an optional value. If it is indicated and non-zero and if the number and significance are negative, rounding up is carried out based on that value. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `CEILING` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_CHOOSE.md b/docs/build/reference/transformer/Excel/Excel_CHOOSE.md index 41bdddf77..c81e88902 100644 --- a/docs/build/reference/transformer/Excel/Excel_CHOOSE.md +++ b/docs/build/reference/transformer/Excel/Excel_CHOOSE.md @@ -8,8 +8,6 @@ tags: # Choose - - Excel CHOOSE(index; value1; ... value30): Uses an index to return a value from a list of up to 30 values. Index is a reference or number between 1 and 30 indicating which value is to be taken from the list. Value1; ... value30 is the list of values entered as a reference to a cell or as individual values. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `CHOOSE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_CLEAN.md b/docs/build/reference/transformer/Excel/Excel_CLEAN.md index f94dd7c4b..bd20ef9ad 100644 --- a/docs/build/reference/transformer/Excel/Excel_CLEAN.md +++ b/docs/build/reference/transformer/Excel/Excel_CLEAN.md @@ -8,8 +8,6 @@ tags: # Clean - - Excel CLEAN(text): Removes all non-printing characters from the string. Text refers to the text from which to remove all non-printable characters. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `CLEAN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_CODE.md b/docs/build/reference/transformer/Excel/Excel_CODE.md index a01209e9a..8fed63f48 100644 --- a/docs/build/reference/transformer/Excel/Excel_CODE.md +++ b/docs/build/reference/transformer/Excel/Excel_CODE.md @@ -8,8 +8,6 @@ tags: # Code - - Excel CODE(text): Returns a numeric code for the first character in a text string. Text is the text for which the code of the first character is to be found. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `CODE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_COMBIN.md b/docs/build/reference/transformer/Excel/Excel_COMBIN.md index 1ee4b618a..b1f2bbd98 100644 --- a/docs/build/reference/transformer/Excel/Excel_COMBIN.md +++ b/docs/build/reference/transformer/Excel/Excel_COMBIN.md @@ -8,8 +8,6 @@ tags: # Combin - - Excel COMBIN(count_1; count_2): Returns the number of combinations for a given number of objects. Count_1 is the total number of elements. Count_2 is the selected count from the elements. This is the same as the nCr function on a calculator. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `COMBIN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_CORREL.md b/docs/build/reference/transformer/Excel/Excel_CORREL.md index 9c23fd235..41670c1cf 100644 --- a/docs/build/reference/transformer/Excel/Excel_CORREL.md +++ b/docs/build/reference/transformer/Excel/Excel_CORREL.md @@ -8,8 +8,6 @@ tags: # Correl - - Excel CORREL(data_1; data_2): Returns the correlation coefficient between two data sets. Data_1 is the first data set. Data_2 is the second data set. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `CORREL` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_COS.md b/docs/build/reference/transformer/Excel/Excel_COS.md index 48fed0713..8869bbeab 100644 --- a/docs/build/reference/transformer/Excel/Excel_COS.md +++ b/docs/build/reference/transformer/Excel/Excel_COS.md @@ -8,8 +8,6 @@ tags: # Cos - - Excel COS(number): Returns the cosine of the given number (angle in radians). ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `COS` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_COSH.md b/docs/build/reference/transformer/Excel/Excel_COSH.md index 7b71113cc..8799f957d 100644 --- a/docs/build/reference/transformer/Excel/Excel_COSH.md +++ b/docs/build/reference/transformer/Excel/Excel_COSH.md @@ -8,8 +8,6 @@ tags: # Cosh - - Excel COSH(number): Returns the hyperbolic cosine of the given number (angle in radians). ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `COSH` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_COUNT.md b/docs/build/reference/transformer/Excel/Excel_COUNT.md index 43f7ad29b..93c16fd3f 100644 --- a/docs/build/reference/transformer/Excel/Excel_COUNT.md +++ b/docs/build/reference/transformer/Excel/Excel_COUNT.md @@ -8,8 +8,6 @@ tags: # Count - - Excel COUNT(value_1; value_2; ... value_30): Counts how many numbers are in the list of arguments. Text entries are ignored. Value_1; value_2; ... value_30 are values or ranges which are to be counted. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `COUNT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_COUNTA.md b/docs/build/reference/transformer/Excel/Excel_COUNTA.md index 599ecf982..e6d4aef73 100644 --- a/docs/build/reference/transformer/Excel/Excel_COUNTA.md +++ b/docs/build/reference/transformer/Excel/Excel_COUNTA.md @@ -8,8 +8,6 @@ tags: # Counta - - Excel COUNTA(value_1; value_2; ... value_30): Counts how many values are in the list of arguments. Text entries are also counted, even when they contain an empty string of length 0. If an argument is an array or reference, empty cells within the array or reference are ignored. value_1; value_2; ... value_30 are up to 30 arguments representing the values to be counted. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `COUNTA` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_COVAR.md b/docs/build/reference/transformer/Excel/Excel_COVAR.md index 7246beb22..a8a32ebe5 100644 --- a/docs/build/reference/transformer/Excel/Excel_COVAR.md +++ b/docs/build/reference/transformer/Excel/Excel_COVAR.md @@ -8,8 +8,6 @@ tags: # Covar - - Excel COVAR(data_1; data_2): Returns the covariance of the product of paired deviations. Data_1 is the first data set. Data_2 is the second data set. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `COVAR` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_DEGREES.md b/docs/build/reference/transformer/Excel/Excel_DEGREES.md index 6a991ece0..0e08f6bbc 100644 --- a/docs/build/reference/transformer/Excel/Excel_DEGREES.md +++ b/docs/build/reference/transformer/Excel/Excel_DEGREES.md @@ -8,8 +8,6 @@ tags: # Degrees - - Excel DEGREES(number): Converts the given number in radians to degrees. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `DEGREES` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_DEVSQ.md b/docs/build/reference/transformer/Excel/Excel_DEVSQ.md index 7e5cf6e58..a69a21bd3 100644 --- a/docs/build/reference/transformer/Excel/Excel_DEVSQ.md +++ b/docs/build/reference/transformer/Excel/Excel_DEVSQ.md @@ -8,8 +8,6 @@ tags: # Devsq - - Excel DEVSQ(number_1; number_2; ... number_30): Returns the sum of squares of deviations based on a sample mean. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `DEVSQ` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_EVEN.md b/docs/build/reference/transformer/Excel/Excel_EVEN.md index 788e36c6a..d1cc82fca 100644 --- a/docs/build/reference/transformer/Excel/Excel_EVEN.md +++ b/docs/build/reference/transformer/Excel/Excel_EVEN.md @@ -8,8 +8,6 @@ tags: # Even - - Excel EVEN(number): Rounds the given number up to the nearest even integer. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `EVEN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_EXACT.md b/docs/build/reference/transformer/Excel/Excel_EXACT.md index fc6e8aaa1..a848a1f52 100644 --- a/docs/build/reference/transformer/Excel/Excel_EXACT.md +++ b/docs/build/reference/transformer/Excel/Excel_EXACT.md @@ -8,8 +8,6 @@ tags: # Exact - - Excel EXACT(text_1; text_2): Compares two text strings and returns TRUE if they are identical. This function is case- sensitive. Text_1 is the first text to compare. Text_2 is the second text to compare. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `EXACT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_EXP.md b/docs/build/reference/transformer/Excel/Excel_EXP.md index 988988543..250a3920f 100644 --- a/docs/build/reference/transformer/Excel/Excel_EXP.md +++ b/docs/build/reference/transformer/Excel/Excel_EXP.md @@ -8,8 +8,6 @@ tags: # Exp - - Excel EXP(number): Returns e raised to the power of the given number. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `EXP` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_FACT.md b/docs/build/reference/transformer/Excel/Excel_FACT.md index 6bcf0b857..e996a1dc0 100644 --- a/docs/build/reference/transformer/Excel/Excel_FACT.md +++ b/docs/build/reference/transformer/Excel/Excel_FACT.md @@ -8,8 +8,6 @@ tags: # Fact - - Excel FACT(number): Returns the factorial of the given number. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `FACT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_FALSE.md b/docs/build/reference/transformer/Excel/Excel_FALSE.md index 8df870cf7..94dc0e67e 100644 --- a/docs/build/reference/transformer/Excel/Excel_FALSE.md +++ b/docs/build/reference/transformer/Excel/Excel_FALSE.md @@ -8,8 +8,6 @@ tags: # False - - Excel FALSE(): Set the logical value to FALSE. The FALSE() function does not require any arguments. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `FALSE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_FIND.md b/docs/build/reference/transformer/Excel/Excel_FIND.md index 97cbb6634..f21cba0c2 100644 --- a/docs/build/reference/transformer/Excel/Excel_FIND.md +++ b/docs/build/reference/transformer/Excel/Excel_FIND.md @@ -8,8 +8,6 @@ tags: # Find - - Excel FIND(find_text; text; position): Looks for a string of text within another string. Where to begin the search can also be defined. The search term can be a number or any string of characters. The search is case-sensitive. Find_text is the text to be found. Text is the text where the search takes place. Position (optional) is the position in the text from which the search starts. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `FIND` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_FLOOR.md b/docs/build/reference/transformer/Excel/Excel_FLOOR.md index 1940904f6..cdd2f1714 100644 --- a/docs/build/reference/transformer/Excel/Excel_FLOOR.md +++ b/docs/build/reference/transformer/Excel/Excel_FLOOR.md @@ -8,8 +8,6 @@ tags: # Floor - - Excel FLOOR(number; significance; mode): Rounds the given number down to the nearest multiple of significance. Significance is the value to whose multiple of ten the number is to be rounded down (.01, .1, 1, 10, etc.). Mode is an optional value. If it is indicated and non-zero and if the number and significance are negative, rounding up is carried out based on that value. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `FLOOR` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_FORECAST.md b/docs/build/reference/transformer/Excel/Excel_FORECAST.md index 518d0a848..afd080666 100644 --- a/docs/build/reference/transformer/Excel/Excel_FORECAST.md +++ b/docs/build/reference/transformer/Excel/Excel_FORECAST.md @@ -8,8 +8,6 @@ tags: # Forecast - - Excel FORECAST(value; data_Y; data_X): Extrapolates future values based on existing x and y values. Value is the x value, for which the y value of the linear regression is to be returned. Data_Y is the array or range of known y's. Data_X is the array or range of known x's. Does not work for exponential functions. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `FORECAST` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_FV.md b/docs/build/reference/transformer/Excel/Excel_FV.md index 1be99e2f2..587b1d629 100644 --- a/docs/build/reference/transformer/Excel/Excel_FV.md +++ b/docs/build/reference/transformer/Excel/Excel_FV.md @@ -8,8 +8,6 @@ tags: # Fv - - Excel FV(rate; NPER; PMT; PV; type): Returns the future value of an investment based on periodic, constant payments and a constant interest rate. Rate is the periodic interest rate. NPER is the total number of periods. PMT is the annuity paid regularly per period. PV (optional) is the present cash value of an investment. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `FV` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_GEOMEAN.md b/docs/build/reference/transformer/Excel/Excel_GEOMEAN.md index 72d1cb6f9..d4f4d519c 100644 --- a/docs/build/reference/transformer/Excel/Excel_GEOMEAN.md +++ b/docs/build/reference/transformer/Excel/Excel_GEOMEAN.md @@ -8,8 +8,6 @@ tags: # Geomean - - Excel GEOMEAN(number_1; number_2; ... number_30): Returns the geometric mean of a sample. Number_1; number_2; ... number_30 are numerical arguments or ranges that represent a random sample. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `GEOMEAN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_IF.md b/docs/build/reference/transformer/Excel/Excel_IF.md index 2654c3d65..c0bcafc53 100644 --- a/docs/build/reference/transformer/Excel/Excel_IF.md +++ b/docs/build/reference/transformer/Excel/Excel_IF.md @@ -8,8 +8,6 @@ tags: # If - - Excel IF(test; then_value; otherwise_value): Returns different values based on the test value. Note that in this implementation it will not actually evaluate logical conditions. Then_value is the value that is returned if the test is TRUE. Otherwise_value (optional) is the value that is returned if the test is FALSE. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `IF` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_INT.md b/docs/build/reference/transformer/Excel/Excel_INT.md index 5ad7f8708..edd3305bf 100644 --- a/docs/build/reference/transformer/Excel/Excel_INT.md +++ b/docs/build/reference/transformer/Excel/Excel_INT.md @@ -8,8 +8,6 @@ tags: # Int - - Excel INT(number): Rounds the given number down to the nearest integer. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `INT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_INTERCEPT.md b/docs/build/reference/transformer/Excel/Excel_INTERCEPT.md index fa4d859f4..fe393abc3 100644 --- a/docs/build/reference/transformer/Excel/Excel_INTERCEPT.md +++ b/docs/build/reference/transformer/Excel/Excel_INTERCEPT.md @@ -8,8 +8,6 @@ tags: # Intercept - - Excel INTERCEPT(data_Y; data_X): Calculates the y-value at which a line will intersect the y-axis by using known x-values and y-values. Data_Y is the dependent set of observations or data. Data_X is the independent set of observations or data. Names, arrays or references containing numbers must be used here. Numbers can also be entered directly. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `INTERCEPT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_IPMT.md b/docs/build/reference/transformer/Excel/Excel_IPMT.md index a09184aaa..3a8d4acb1 100644 --- a/docs/build/reference/transformer/Excel/Excel_IPMT.md +++ b/docs/build/reference/transformer/Excel/Excel_IPMT.md @@ -8,8 +8,6 @@ tags: # Ipmt - - Excel IPMT(rate; period; NPER; PV; FV; type): Calculates the periodic amortization for an investment with regular payments and a constant interest rate. Rate is the periodic interest rate. Period is the period for which the compound interest is calculated. NPER is the total number of periods during which annuity is paid. Period=NPER, if compound interest for the last period is calculated. PV is the present cash value in sequence of payments. FV (optional) is the desired value (future value) at the end of the periods. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `IPMT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_IRR.md b/docs/build/reference/transformer/Excel/Excel_IRR.md index 090e35c36..9110daefc 100644 --- a/docs/build/reference/transformer/Excel/Excel_IRR.md +++ b/docs/build/reference/transformer/Excel/Excel_IRR.md @@ -8,8 +8,6 @@ tags: # Irr - - Excel IRR(values; guess): Calculates the internal rate of return for an investment. The values represent cash flow values at regular intervals; at least one value must be negative (payments), and at least one value must be positive (income). Values is an array containing the values. Guess (optional) is the estimated value. If you can provide only a few values, you should provide an initial guess to enable the iteration. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `IRR` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_LARGE.md b/docs/build/reference/transformer/Excel/Excel_LARGE.md index 3f3f3c4a7..116689722 100644 --- a/docs/build/reference/transformer/Excel/Excel_LARGE.md +++ b/docs/build/reference/transformer/Excel/Excel_LARGE.md @@ -8,8 +8,6 @@ tags: # Large - - Excel LARGE(data; rank_c): Returns the Rank_c-th largest value in a data set. Data is the cell range of data. Rank_c is the ranking of the value (2nd largest, 3rd largest, etc.) written as an integer. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `LARGE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_LEFT.md b/docs/build/reference/transformer/Excel/Excel_LEFT.md index d32e50ffc..c34e1341b 100644 --- a/docs/build/reference/transformer/Excel/Excel_LEFT.md +++ b/docs/build/reference/transformer/Excel/Excel_LEFT.md @@ -8,8 +8,6 @@ tags: # Left - - Excel LEFT(text; number): Returns the first character or characters in a text string. Text is the text where the initial partial words are to be determined. Number (optional) is the number of characters for the start text. If this parameter is not defined, one character is returned. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `LEFT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_LN.md b/docs/build/reference/transformer/Excel/Excel_LN.md index 69f6959cd..08bb7f263 100644 --- a/docs/build/reference/transformer/Excel/Excel_LN.md +++ b/docs/build/reference/transformer/Excel/Excel_LN.md @@ -8,8 +8,6 @@ tags: # Ln - - Excel LN(number): Returns the natural logarithm based on the constant e of the given number. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `LN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_LOG.md b/docs/build/reference/transformer/Excel/Excel_LOG.md index 5c4c37b81..41c7ffc83 100644 --- a/docs/build/reference/transformer/Excel/Excel_LOG.md +++ b/docs/build/reference/transformer/Excel/Excel_LOG.md @@ -8,8 +8,6 @@ tags: # Log - - Excel LOG(number; base): Returns the logarithm of the given number to the specified base. Base is the base for the logarithm calculation. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `LOG` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_LOG10.md b/docs/build/reference/transformer/Excel/Excel_LOG10.md index 844c0cbe2..b97a0b93c 100644 --- a/docs/build/reference/transformer/Excel/Excel_LOG10.md +++ b/docs/build/reference/transformer/Excel/Excel_LOG10.md @@ -8,8 +8,6 @@ tags: # Log10 - - Excel LOG10(number): Returns the base-10 logarithm of the given number. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `LOG10` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_MAX.md b/docs/build/reference/transformer/Excel/Excel_MAX.md index db8f0852b..27b24d07b 100644 --- a/docs/build/reference/transformer/Excel/Excel_MAX.md +++ b/docs/build/reference/transformer/Excel/Excel_MAX.md @@ -8,8 +8,6 @@ tags: # Max - - Excel MAX(number_1; number_2; ... number_30): Returns the maximum value in a list of arguments. Number_1; number_2; ... number_30 are numerical values or ranges. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `MAX` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_MAXA.md b/docs/build/reference/transformer/Excel/Excel_MAXA.md index 6c18e0afb..e60c43021 100644 --- a/docs/build/reference/transformer/Excel/Excel_MAXA.md +++ b/docs/build/reference/transformer/Excel/Excel_MAXA.md @@ -8,8 +8,6 @@ tags: # Maxa - - Excel MAXA(value_1; value_2; ... value_30): Returns the maximum value in a list of arguments. Unlike MAX, text can be entered. The value of the text is 0. Value_1; value_2; ... value_30 are values or ranges. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `MAXA` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_MEDIAN.md b/docs/build/reference/transformer/Excel/Excel_MEDIAN.md index 78c96327f..bc09ef899 100644 --- a/docs/build/reference/transformer/Excel/Excel_MEDIAN.md +++ b/docs/build/reference/transformer/Excel/Excel_MEDIAN.md @@ -8,8 +8,6 @@ tags: # Median - - Excel MEDIAN(number_1; number_2; ... number_30): Returns the median of a set of numbers. Number_1; number_2; ... number_30 are values or ranges, which represent a sample. Each number can also be replaced by a reference. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `MEDIAN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_MID.md b/docs/build/reference/transformer/Excel/Excel_MID.md index 9f8d8764b..b08f492e2 100644 --- a/docs/build/reference/transformer/Excel/Excel_MID.md +++ b/docs/build/reference/transformer/Excel/Excel_MID.md @@ -8,8 +8,6 @@ tags: # Mid - - Excel MID(text; start; number): Returns a text segment of a character string. The parameters specify the starting position and the number of characters. Text is the text containing the characters to extract. Start is the position of the first character in the text to extract. Number is the number of characters in the part of the text. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `MID` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_MIN.md b/docs/build/reference/transformer/Excel/Excel_MIN.md index 5193a07b0..1c0787d5e 100644 --- a/docs/build/reference/transformer/Excel/Excel_MIN.md +++ b/docs/build/reference/transformer/Excel/Excel_MIN.md @@ -8,8 +8,6 @@ tags: # Min - - Excel MIN(number_1; number_2; ... number_30): Returns the minimum value in a list of arguments. Number_1; number_2; ... number_30 are numerical values or ranges. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `MIN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_MINA.md b/docs/build/reference/transformer/Excel/Excel_MINA.md index 8c8c07bbe..013dbb3df 100644 --- a/docs/build/reference/transformer/Excel/Excel_MINA.md +++ b/docs/build/reference/transformer/Excel/Excel_MINA.md @@ -8,8 +8,6 @@ tags: # Mina - - Excel MINA(value_1; value_2; ... value_30): Returns the minimum value in a list of arguments. Here text can also be entered. The value of the text is 0. Value_1; value_2; ... value_30 are values or ranges. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `MINA` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_MIRR.md b/docs/build/reference/transformer/Excel/Excel_MIRR.md index 39adb9152..8f3079816 100644 --- a/docs/build/reference/transformer/Excel/Excel_MIRR.md +++ b/docs/build/reference/transformer/Excel/Excel_MIRR.md @@ -8,8 +8,6 @@ tags: # Mirr - - Excel MIRR(values; investment; reinvest_rate): Calculates the modified internal rate of return of a series of investments. Values corresponds to the array or the cell reference for cells whose content corresponds to the payments. Investment is the rate of interest of the investments (the negative values of the array) Reinvest_rate is the rate of interest of the reinvestment (the positive values of the array). ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `MIRR` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_MOD.md b/docs/build/reference/transformer/Excel/Excel_MOD.md index 3c6fe204b..d5cb2b096 100644 --- a/docs/build/reference/transformer/Excel/Excel_MOD.md +++ b/docs/build/reference/transformer/Excel/Excel_MOD.md @@ -8,8 +8,6 @@ tags: # Mod - - Excel MOD(dividend; divisor): Returns the remainder after a number is divided by a divisor. Dividend is the number which will be divided by the divisor. Divisor is the number by which to divide the dividend. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `MOD` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_MODE.md b/docs/build/reference/transformer/Excel/Excel_MODE.md index 3363d2dc7..1f3a81826 100644 --- a/docs/build/reference/transformer/Excel/Excel_MODE.md +++ b/docs/build/reference/transformer/Excel/Excel_MODE.md @@ -8,8 +8,6 @@ tags: # Mode - - Excel MODE(number_1; number_2; ... number_30): Returns the most common value in a data set. Number_1; number_2; ... number_30 are numerical values or ranges. If several values have the same frequency, it returns the smallest value. An error occurs when a value does not appear twice. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `MODE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_NORMDIST.md b/docs/build/reference/transformer/Excel/Excel_NORMDIST.md index 2fcbf79eb..92afdc17b 100644 --- a/docs/build/reference/transformer/Excel/Excel_NORMDIST.md +++ b/docs/build/reference/transformer/Excel/Excel_NORMDIST.md @@ -8,8 +8,6 @@ tags: # Normdist - - Excel NORMDIST(number; mean; STDEV; C): Returns the normal distribution for the given Number in the distribution. Mean is the mean value of the distribution. STDEV is the standard deviation of the distribution. C = 0 calculates the density function, and C = 1 calculates the distribution. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `NORMDIST` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_NORMINV.md b/docs/build/reference/transformer/Excel/Excel_NORMINV.md index d8cdb2837..b101d7b1b 100644 --- a/docs/build/reference/transformer/Excel/Excel_NORMINV.md +++ b/docs/build/reference/transformer/Excel/Excel_NORMINV.md @@ -8,8 +8,6 @@ tags: # Norminv - - Excel NORMINV(number; mean; STDEV): Returns the inverse of the normal distribution for the given Number in the distribution. Mean is the mean value in the normal distribution. STDEV is the standard deviation of the normal distribution. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `NORMINV` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_NORMSDIST.md b/docs/build/reference/transformer/Excel/Excel_NORMSDIST.md index 4df1241c8..b6144fd26 100644 --- a/docs/build/reference/transformer/Excel/Excel_NORMSDIST.md +++ b/docs/build/reference/transformer/Excel/Excel_NORMSDIST.md @@ -8,8 +8,6 @@ tags: # Normsdist - - Excel NORMSDIST(number): Returns the standard normal cumulative distribution for the given Number. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `NORMSDIST` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_NORMSINV.md b/docs/build/reference/transformer/Excel/Excel_NORMSINV.md index d23dcca31..918bd4908 100644 --- a/docs/build/reference/transformer/Excel/Excel_NORMSINV.md +++ b/docs/build/reference/transformer/Excel/Excel_NORMSINV.md @@ -8,8 +8,6 @@ tags: # Normsinv - - Excel NORMSINV(number): Returns the inverse of the standard normal distribution for the given Number, a probability value. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `NORMSINV` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_NOT.md b/docs/build/reference/transformer/Excel/Excel_NOT.md index c9befb756..7cd71484b 100644 --- a/docs/build/reference/transformer/Excel/Excel_NOT.md +++ b/docs/build/reference/transformer/Excel/Excel_NOT.md @@ -8,8 +8,6 @@ tags: # Not - - Excel NOT(logical_value): Reverses the logical value. Logical_value is any value to be reversed. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `NOT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_NPER.md b/docs/build/reference/transformer/Excel/Excel_NPER.md index 0a44af657..e664f3c68 100644 --- a/docs/build/reference/transformer/Excel/Excel_NPER.md +++ b/docs/build/reference/transformer/Excel/Excel_NPER.md @@ -8,8 +8,6 @@ tags: # Nper - - Excel NPER(rate; PMT; PV; FV; type): Returns the number of periods for an investment based on periodic, constant payments and a constant interest rate. Rate is the periodic interest rate. PMT is the constant annuity paid in each period. PV is the present value (cash value) in a sequence of payments. FV (optional) is the future value, which is reached at the end of the last period. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `NPER` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_NPV.md b/docs/build/reference/transformer/Excel/Excel_NPV.md index 086092486..8621f8e3f 100644 --- a/docs/build/reference/transformer/Excel/Excel_NPV.md +++ b/docs/build/reference/transformer/Excel/Excel_NPV.md @@ -8,8 +8,6 @@ tags: # Npv - - Excel NPV(Rate; value_1; value_2; ... value_30): Returns the net present value of an investment based on a series of periodic cash flows and a discount rate. Rate is the discount rate for a period. Value_1; value_2;... value_30 are values representing deposits or withdrawals. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `NPV` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ODD.md b/docs/build/reference/transformer/Excel/Excel_ODD.md index 9080f4153..676b155f2 100644 --- a/docs/build/reference/transformer/Excel/Excel_ODD.md +++ b/docs/build/reference/transformer/Excel/Excel_ODD.md @@ -8,8 +8,6 @@ tags: # Odd - - Excel ODD(number): Rounds the given number up to the nearest odd integer. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ODD` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_OR.md b/docs/build/reference/transformer/Excel/Excel_OR.md index 12079437a..a30102225 100644 --- a/docs/build/reference/transformer/Excel/Excel_OR.md +++ b/docs/build/reference/transformer/Excel/Excel_OR.md @@ -8,8 +8,6 @@ tags: # Or - - Excel OR(logical_value_1; logical_value_2; ...logical_value_30): Returns TRUE if at least one argument is TRUE. Returns the value FALSE if all the arguments have the logical value FALSE. Logical_value_1; logical_value_2; ...logical_value_30 are conditions to be checked. All conditions can be either TRUE or FALSE. If a range is entered as a parameter, the function uses the value from the range that is in the current column or row. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `OR` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_PEARSON.md b/docs/build/reference/transformer/Excel/Excel_PEARSON.md index 5ce586e68..a052bbf74 100644 --- a/docs/build/reference/transformer/Excel/Excel_PEARSON.md +++ b/docs/build/reference/transformer/Excel/Excel_PEARSON.md @@ -8,8 +8,6 @@ tags: # Pearson - - Excel PEARSON(data_1; data_2): Returns the Pearson product moment correlation coefficient r. Data_1 is the array of the first data set. Data_2 is the array of the second data set. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `PEARSON` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_PERCENTILE.md b/docs/build/reference/transformer/Excel/Excel_PERCENTILE.md index 279c6bc2b..e1b8357b6 100644 --- a/docs/build/reference/transformer/Excel/Excel_PERCENTILE.md +++ b/docs/build/reference/transformer/Excel/Excel_PERCENTILE.md @@ -8,8 +8,6 @@ tags: # Percentile - - Excel PERCENTILE(data; alpha): Returns the alpha-percentile of data values in an array. Data is the array of data. Alpha is the percentage of the scale between 0 and 1. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `PERCENTILE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_PERCENTRANK.md b/docs/build/reference/transformer/Excel/Excel_PERCENTRANK.md index ebf7be195..813d91465 100644 --- a/docs/build/reference/transformer/Excel/Excel_PERCENTRANK.md +++ b/docs/build/reference/transformer/Excel/Excel_PERCENTRANK.md @@ -8,8 +8,6 @@ tags: # Percentrank - - Excel PERCENTRANK(data; value): Returns the percentage rank (percentile) of the given value in a sample. Data is the array of data in the sample. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `PERCENTRANK` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_PI.md b/docs/build/reference/transformer/Excel/Excel_PI.md index 5e5010679..fbfb513d1 100644 --- a/docs/build/reference/transformer/Excel/Excel_PI.md +++ b/docs/build/reference/transformer/Excel/Excel_PI.md @@ -8,8 +8,6 @@ tags: # Pi - - Excel PI(): Returns the value of PI to fourteen decimal places. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `PI` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_PMT.md b/docs/build/reference/transformer/Excel/Excel_PMT.md index 9fd91e49b..87bac964f 100644 --- a/docs/build/reference/transformer/Excel/Excel_PMT.md +++ b/docs/build/reference/transformer/Excel/Excel_PMT.md @@ -8,8 +8,6 @@ tags: # Pmt - - Excel PMT(rate; NPER; PV; FV; type): Returns the periodic payment for an annuity with constant interest rates. Rate is the periodic interest rate. NPER is the number of periods in which annuity is paid. PV is the present value (cash value) in a sequence of payments. FV (optional) is the desired value (future value) to be reached at the end of the periodic payments. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `PMT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_POISSON.md b/docs/build/reference/transformer/Excel/Excel_POISSON.md index 896951313..5f86ae3d3 100644 --- a/docs/build/reference/transformer/Excel/Excel_POISSON.md +++ b/docs/build/reference/transformer/Excel/Excel_POISSON.md @@ -8,8 +8,6 @@ tags: # Poisson - - Excel POISSON(number; mean; C): Returns the Poisson distribution for the given Number. Mean is the middle value of the Poisson distribution. C = 0 calculates the density function, and C = 1 calculates the distribution. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `POISSON` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_POWER.md b/docs/build/reference/transformer/Excel/Excel_POWER.md index 2a8ab348b..8ef7c6427 100644 --- a/docs/build/reference/transformer/Excel/Excel_POWER.md +++ b/docs/build/reference/transformer/Excel/Excel_POWER.md @@ -8,8 +8,6 @@ tags: # Power - - Excel POWER(base; power): Returns the result of a number raised to a power. Base is the number that is to be raised to the given power. Power is the exponent by which the base is to be raised. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `POWER` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_PPMT.md b/docs/build/reference/transformer/Excel/Excel_PPMT.md index 9dcbf59aa..c701740ea 100644 --- a/docs/build/reference/transformer/Excel/Excel_PPMT.md +++ b/docs/build/reference/transformer/Excel/Excel_PPMT.md @@ -8,8 +8,6 @@ tags: # Ppmt - - Excel PPMT(rate; period; NPER; PV; FV; type): Returns for a given period the payment on the principal for an investment that is based on periodic and constant payments and a constant interest rate. Rate is the periodic interest rate. Period is the amortization period. NPER is the total number of periods during which annuity is paid. PV is the present value in the sequence of payments. FV (optional) is the desired (future) value. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `PPMT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_PRODUCT.md b/docs/build/reference/transformer/Excel/Excel_PRODUCT.md index a0a2a32a8..d436a504c 100644 --- a/docs/build/reference/transformer/Excel/Excel_PRODUCT.md +++ b/docs/build/reference/transformer/Excel/Excel_PRODUCT.md @@ -8,8 +8,6 @@ tags: # Product - - Excel PRODUCT(number 1 to 30): Multiplies all the numbers given as arguments and returns the product. Number 1 to number 30 are up to 30 arguments whose product is to be calculated, separated by semi-colons. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `PRODUCT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_PROPER.md b/docs/build/reference/transformer/Excel/Excel_PROPER.md index 473236ce9..97116e85a 100644 --- a/docs/build/reference/transformer/Excel/Excel_PROPER.md +++ b/docs/build/reference/transformer/Excel/Excel_PROPER.md @@ -8,8 +8,6 @@ tags: # Proper - - Excel PROPER(text): Capitalizes the first letter in all words of a text string. Text is the text to be converted. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `PROPER` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_PV.md b/docs/build/reference/transformer/Excel/Excel_PV.md index 542135268..fb6897c56 100644 --- a/docs/build/reference/transformer/Excel/Excel_PV.md +++ b/docs/build/reference/transformer/Excel/Excel_PV.md @@ -8,8 +8,6 @@ tags: # Pv - - Excel PV(rate; NPER; PMT; FV; type): Returns the present value of an investment resulting from a series of regular payments. Rate defines the interest rate per period. NPER is the total number of payment periods. PMT is the regular payment made per period. FV (optional) defines the future value remaining after the final installment has been made. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `PV` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_RADIANS.md b/docs/build/reference/transformer/Excel/Excel_RADIANS.md index 1dea5c36b..7b7027b1c 100644 --- a/docs/build/reference/transformer/Excel/Excel_RADIANS.md +++ b/docs/build/reference/transformer/Excel/Excel_RADIANS.md @@ -8,8 +8,6 @@ tags: # Radians - - Excel RADIANS(number): Converts the given number in degrees to radians. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `RADIANS` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_RAND.md b/docs/build/reference/transformer/Excel/Excel_RAND.md index 770a83a49..e43196452 100644 --- a/docs/build/reference/transformer/Excel/Excel_RAND.md +++ b/docs/build/reference/transformer/Excel/Excel_RAND.md @@ -8,8 +8,6 @@ tags: # Rand - - Excel RAND(): Returns a random number between 0 and 1. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `RAND` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_RANK.md b/docs/build/reference/transformer/Excel/Excel_RANK.md index dfcbd839d..71e227cba 100644 --- a/docs/build/reference/transformer/Excel/Excel_RANK.md +++ b/docs/build/reference/transformer/Excel/Excel_RANK.md @@ -8,8 +8,6 @@ tags: # Rank - - Excel RANK(value; data; type): Returns the rank of the given Value in a sample. Data is the array or range of data in the sample. Type (optional) is the sequence order, either ascending (0) or descending (1). ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `RANK` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_RATE.md b/docs/build/reference/transformer/Excel/Excel_RATE.md index a6c3b9949..895a2087d 100644 --- a/docs/build/reference/transformer/Excel/Excel_RATE.md +++ b/docs/build/reference/transformer/Excel/Excel_RATE.md @@ -8,8 +8,6 @@ tags: # Rate - - Excel RATE(NPER; PMT; PV; FV; type; guess): Returns the constant interest rate per period of an annuity. NPER is the total number of periods, during which payments are made (payment period). PMT is the constant payment (annuity) paid during each period. PV is the cash value in the sequence of payments. FV (optional) is the future value, which is reached at the end of the periodic payments. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. Guess (optional) determines the estimated value of the interest with iterative calculation. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `RATE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_REPLACE.md b/docs/build/reference/transformer/Excel/Excel_REPLACE.md index 1b78f81fc..14a76dcb2 100644 --- a/docs/build/reference/transformer/Excel/Excel_REPLACE.md +++ b/docs/build/reference/transformer/Excel/Excel_REPLACE.md @@ -8,8 +8,6 @@ tags: # Replace - - Excel REPLACE(text; position; length; new_text): Replaces part of a text string with a different text string. This function can be used to replace both characters and numbers (which are automatically converted to text). The result of the function is always displayed as text. To perform further calculations with a number which has been replaced by text, convert it back to a number using the VALUE function. Any text containing numbers must be enclosed in quotation marks so it is not interpreted as a number and automatically converted to text. Text is text of which a part will be replaced. Position is the position within the text where the replacement will begin. Length is the number of characters in text to be replaced. New_text is the text which replaces text.. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `REPLACE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_REPT.md b/docs/build/reference/transformer/Excel/Excel_REPT.md index 360c81d64..196c35efe 100644 --- a/docs/build/reference/transformer/Excel/Excel_REPT.md +++ b/docs/build/reference/transformer/Excel/Excel_REPT.md @@ -8,8 +8,6 @@ tags: # Rept - - Excel REPT(text; number): Repeats a character string by the given number of copies. Text is the text to be repeated. Number is the number of repetitions. The result can be a maximum of 255 characters. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `REPT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_RIGHT.md b/docs/build/reference/transformer/Excel/Excel_RIGHT.md index 8ad63b8e8..7a0692bd6 100644 --- a/docs/build/reference/transformer/Excel/Excel_RIGHT.md +++ b/docs/build/reference/transformer/Excel/Excel_RIGHT.md @@ -8,8 +8,6 @@ tags: # Right - - Excel RIGHT(text; number): Defines the last character or characters in a text string. Text is the text of which the right part is to be determined. Number (optional) is the number of characters from the right part of the text. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `RIGHT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ROMAN.md b/docs/build/reference/transformer/Excel/Excel_ROMAN.md index 03e4e6afc..69bf9870b 100644 --- a/docs/build/reference/transformer/Excel/Excel_ROMAN.md +++ b/docs/build/reference/transformer/Excel/Excel_ROMAN.md @@ -8,8 +8,6 @@ tags: # Roman - - Excel ROMAN(number; mode): Converts a number into a Roman numeral. The value range must be between 0 and 3999; the modes can be integers from 0 to 4. Number is the number that is to be converted into a Roman numeral. Mode (optional) indicates the degree of simplification. The higher the value, the greater is the simplification of the Roman numeral. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ROMAN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ROUND.md b/docs/build/reference/transformer/Excel/Excel_ROUND.md index 1b5697314..5a6805c0d 100644 --- a/docs/build/reference/transformer/Excel/Excel_ROUND.md +++ b/docs/build/reference/transformer/Excel/Excel_ROUND.md @@ -8,8 +8,6 @@ tags: # Round - - Excel ROUND(number; count): Rounds the given number to a certain number of decimal places according to valid mathematical criteria. Count (optional) is the number of the places to which the value is to be rounded. If the count parameter is negative, only the whole number portion is rounded. It is rounded to the place indicated by the count. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ROUND` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ROUNDDOWN.md b/docs/build/reference/transformer/Excel/Excel_ROUNDDOWN.md index c6fe8a733..6eadbafde 100644 --- a/docs/build/reference/transformer/Excel/Excel_ROUNDDOWN.md +++ b/docs/build/reference/transformer/Excel/Excel_ROUNDDOWN.md @@ -8,8 +8,6 @@ tags: # Rounddown - - Excel ROUNDDOWN(number; count): Rounds the given number. Count (optional) is the number of digits to be rounded down to. If the count parameter is negative, only the whole number portion is rounded. It is rounded to the place indicated by the count. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ROUNDDOWN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_ROUNDUP.md b/docs/build/reference/transformer/Excel/Excel_ROUNDUP.md index b31f32791..64fadeb7e 100644 --- a/docs/build/reference/transformer/Excel/Excel_ROUNDUP.md +++ b/docs/build/reference/transformer/Excel/Excel_ROUNDUP.md @@ -8,8 +8,6 @@ tags: # Roundup - - Excel ROUNDUP(number; count): Rounds the given number up. Count (optional) is the number of digits to which rounding up is to be done. If the count parameter is negative, only the whole number portion is rounded. It is rounded to the place indicated by the count. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `ROUNDUP` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SEARCH.md b/docs/build/reference/transformer/Excel/Excel_SEARCH.md index 609a6f127..0af1917fe 100644 --- a/docs/build/reference/transformer/Excel/Excel_SEARCH.md +++ b/docs/build/reference/transformer/Excel/Excel_SEARCH.md @@ -8,8 +8,6 @@ tags: # Search - - Excel SEARCH(find_text; text; position): Returns the position of a text segment within a character string. The start of the search can be set as an option. The search text can be a number or any sequence of characters. The search is not case-sensitive. The search supports regular expressions. Find_text is the text to be searched for. Text is the text where the search will take place. Position (optional) is the position in the text where the search is to start. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SEARCH` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SIGN.md b/docs/build/reference/transformer/Excel/Excel_SIGN.md index 9332ef7c8..2f384e149 100644 --- a/docs/build/reference/transformer/Excel/Excel_SIGN.md +++ b/docs/build/reference/transformer/Excel/Excel_SIGN.md @@ -8,8 +8,6 @@ tags: # Sign - - Excel SIGN(number): Returns the sign of the given number. The function returns the result 1 for a positive sign, -1 for a negative sign, and 0 for zero. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SIGN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SIN.md b/docs/build/reference/transformer/Excel/Excel_SIN.md index 00b046df1..3084ebc1e 100644 --- a/docs/build/reference/transformer/Excel/Excel_SIN.md +++ b/docs/build/reference/transformer/Excel/Excel_SIN.md @@ -8,8 +8,6 @@ tags: # Sin - - Excel SIN(number): Returns the sine of the given number (angle in radians). ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SIN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SINH.md b/docs/build/reference/transformer/Excel/Excel_SINH.md index a3f12eaf2..a9cc4c5d8 100644 --- a/docs/build/reference/transformer/Excel/Excel_SINH.md +++ b/docs/build/reference/transformer/Excel/Excel_SINH.md @@ -8,8 +8,6 @@ tags: # Sinh - - Excel SINH(number): Returns the hyperbolic sine of the given number (angle in radians). ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SINH` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SLOPE.md b/docs/build/reference/transformer/Excel/Excel_SLOPE.md index d9b3b95f1..eeba32125 100644 --- a/docs/build/reference/transformer/Excel/Excel_SLOPE.md +++ b/docs/build/reference/transformer/Excel/Excel_SLOPE.md @@ -8,8 +8,6 @@ tags: # Slope - - Excel SLOPE(data_Y; data_X): Returns the slope of the linear regression line. Data_Y is the array or matrix of Y data. Data_X is the array or matrix of X data. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SLOPE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SMALL.md b/docs/build/reference/transformer/Excel/Excel_SMALL.md index 9635e018b..1fb0ef13f 100644 --- a/docs/build/reference/transformer/Excel/Excel_SMALL.md +++ b/docs/build/reference/transformer/Excel/Excel_SMALL.md @@ -8,8 +8,6 @@ tags: # Small - - Excel SMALL(data; rank_c): Returns the Rank_c-th smallest value in a data set. Data is the cell range of data. Rank_c is the rank of the value (2nd smallest, 3rd smallest, etc.) written as an integer. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SMALL` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SQRT.md b/docs/build/reference/transformer/Excel/Excel_SQRT.md index ecf893ac6..0e2035978 100644 --- a/docs/build/reference/transformer/Excel/Excel_SQRT.md +++ b/docs/build/reference/transformer/Excel/Excel_SQRT.md @@ -8,8 +8,6 @@ tags: # Sqrt - - Excel SQRT(number): Returns the positive square root of the given number. The value of the number must be positive. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SQRT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_STANDARDIZE.md b/docs/build/reference/transformer/Excel/Excel_STANDARDIZE.md index 6bc3e1233..3bbc29030 100644 --- a/docs/build/reference/transformer/Excel/Excel_STANDARDIZE.md +++ b/docs/build/reference/transformer/Excel/Excel_STANDARDIZE.md @@ -8,8 +8,6 @@ tags: # Standardize - - Excel STANDARDIZE(number; mean; STDEV): Converts a random variable to a normalized value. Number is the value to be standardized. Mean is the arithmetic mean of the distribution. STDEV is the standard deviation of the distribution. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `STANDARDIZE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_STDEV.md b/docs/build/reference/transformer/Excel/Excel_STDEV.md index edc96e0fd..3863dacc5 100644 --- a/docs/build/reference/transformer/Excel/Excel_STDEV.md +++ b/docs/build/reference/transformer/Excel/Excel_STDEV.md @@ -8,8 +8,6 @@ tags: # Stdev - - Excel STDEV(number_1; number_2; ... number_30): Estimates the standard deviation based on a sample. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample based on an entire population. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `STDEV` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_STDEVA.md b/docs/build/reference/transformer/Excel/Excel_STDEVA.md index 8eccefa8a..4503eb463 100644 --- a/docs/build/reference/transformer/Excel/Excel_STDEVA.md +++ b/docs/build/reference/transformer/Excel/Excel_STDEVA.md @@ -8,8 +8,6 @@ tags: # Stdeva - - Excel STDEVA(value_1; value_2; ... value_30): Calculates the standard deviation of an estimation based on a sample. Value_1; value_2; ... value_30 are values or ranges representing a sample derived from an entire population. Text has the value 0. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `STDEVA` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_STDEVP.md b/docs/build/reference/transformer/Excel/Excel_STDEVP.md index 48a854531..277a15b5b 100644 --- a/docs/build/reference/transformer/Excel/Excel_STDEVP.md +++ b/docs/build/reference/transformer/Excel/Excel_STDEVP.md @@ -8,8 +8,6 @@ tags: # Stdevp - - Excel STDEVP(number_1; number_2; ... number_30): Calculates the standard deviation based on the entire population. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample based on an entire population. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `STDEVP` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_STDEVPA.md b/docs/build/reference/transformer/Excel/Excel_STDEVPA.md index e1077c2ca..ad4117c01 100644 --- a/docs/build/reference/transformer/Excel/Excel_STDEVPA.md +++ b/docs/build/reference/transformer/Excel/Excel_STDEVPA.md @@ -8,8 +8,6 @@ tags: # Stdevpa - - Excel STDEVPA(value_1; value_2; ... value_30): Calculates the standard deviation based on the entire population. Value_1; value_2; ... value_30 are values or ranges representing a sample derived from an entire population. Text has the value 0. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `STDEVPA` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SUBSTITUTE.md b/docs/build/reference/transformer/Excel/Excel_SUBSTITUTE.md index 1bb656eab..dcea8157d 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUBSTITUTE.md +++ b/docs/build/reference/transformer/Excel/Excel_SUBSTITUTE.md @@ -8,8 +8,6 @@ tags: # Substitute - - Excel SUBSTITUTE(text; search_text; new text; occurrence): Substitutes new text for old text in a string. Text is the text in which text segments are to be exchanged. Search_text is the text segment that is to be replaced (a number of times). New text is the text that is to replace the text segment. Occurrence (optional) indicates how many occurrences of the search text are to be replaced. If this parameter is missing, the search text is replaced throughout. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SUBSTITUTE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SUM.md b/docs/build/reference/transformer/Excel/Excel_SUM.md index 0d93f3f12..70f590f38 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUM.md +++ b/docs/build/reference/transformer/Excel/Excel_SUM.md @@ -8,8 +8,6 @@ tags: # Sum - - Excel SUM(number_1; number_2; ... number_30): Adds all the numbers in a range of cells. Number_1; number_2;... number_30 are up to 30 arguments whose sum is to be calculated. You can also enter a range using cell references. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SUM` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SUMPRODUCT.md b/docs/build/reference/transformer/Excel/Excel_SUMPRODUCT.md index 21a732634..9e5f86cf7 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMPRODUCT.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMPRODUCT.md @@ -8,8 +8,6 @@ tags: # Sumproduct - - Excel SUMPRODUCT(array 1; array 2; ...array 30): Multiplies corresponding elements in the given arrays, and returns the sum of those products. Array 1; array 2;...array 30 are arrays whose corresponding elements are to be multiplied. At least one array must be part of the argument list. If only one array is given, all array elements are summed. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SUMPRODUCT` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SUMSQ.md b/docs/build/reference/transformer/Excel/Excel_SUMSQ.md index c56660301..33143af95 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMSQ.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMSQ.md @@ -8,8 +8,6 @@ tags: # Sumsq - - Excel SUMSQ(number_1; number_2; ... number_30): Calculates the sum of the squares of numbers (totaling up of the squares of the arguments) Number_1; number_2;... number_30 are up to 30 arguments, the sum of whose squares is to be calculated. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SUMSQ` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SUMX2MY2.md b/docs/build/reference/transformer/Excel/Excel_SUMX2MY2.md index 67c0be731..9d43107e1 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMX2MY2.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMX2MY2.md @@ -8,8 +8,6 @@ tags: # Sumx2my2 - - Excel SUMX2MY2(array_X; array_Y): Returns the sum of the difference of squares of corresponding values in two arrays. Array_X is the first array whose elements are to be squared and added. Array_Y is the second array whose elements are to be squared and subtracted. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SUMX2MY2` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SUMX2PY2.md b/docs/build/reference/transformer/Excel/Excel_SUMX2PY2.md index 0fa4baa27..e18b10845 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMX2PY2.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMX2PY2.md @@ -8,8 +8,6 @@ tags: # Sumx2py2 - - Excel SUMX2PY2(array_X; array_Y): Returns the sum of the sum of squares of corresponding values in two arrays. Array_X is the first array whose arguments are to be squared and added. Array_Y is the second array, whose elements are to be added and squared. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SUMX2PY2` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_SUMXMY2.md b/docs/build/reference/transformer/Excel/Excel_SUMXMY2.md index f20c6f013..ae9d8068b 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMXMY2.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMXMY2.md @@ -8,8 +8,6 @@ tags: # Sumxmy2 - - Excel SUMXMY2(array_X; array_Y): Adds the squares of the variance between corresponding values in two arrays. Array_X is the first array whose elements are to be subtracted and squared. Array_Y is the second array, whose elements are to be subtracted and squared. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `SUMXMY2` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_TAN.md b/docs/build/reference/transformer/Excel/Excel_TAN.md index ba61e33c9..86877770a 100644 --- a/docs/build/reference/transformer/Excel/Excel_TAN.md +++ b/docs/build/reference/transformer/Excel/Excel_TAN.md @@ -8,8 +8,6 @@ tags: # Tan - - Excel TAN(number): Returns the tangent of the given number (angle in radians). ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `TAN` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_TANH.md b/docs/build/reference/transformer/Excel/Excel_TANH.md index 412f90e73..aa17b935d 100644 --- a/docs/build/reference/transformer/Excel/Excel_TANH.md +++ b/docs/build/reference/transformer/Excel/Excel_TANH.md @@ -8,8 +8,6 @@ tags: # Tanh - - Excel TANH(number): Returns the hyperbolic tangent of the given number (angle in radians). ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `TANH` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_TDIST.md b/docs/build/reference/transformer/Excel/Excel_TDIST.md index 215898235..597ac7bf3 100644 --- a/docs/build/reference/transformer/Excel/Excel_TDIST.md +++ b/docs/build/reference/transformer/Excel/Excel_TDIST.md @@ -8,8 +8,6 @@ tags: # Tdist - - Excel TDIST(number; degrees_freedom; mode): Returns the t-distribution for the given Number. Degrees_freedom is the number of degrees of freedom for the t-distribution. Mode = 1 returns the one-tailed test, Mode = 2 returns the two-tailed test. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `TDIST` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_TRUE.md b/docs/build/reference/transformer/Excel/Excel_TRUE.md index a2041362e..2e7d3f0ad 100644 --- a/docs/build/reference/transformer/Excel/Excel_TRUE.md +++ b/docs/build/reference/transformer/Excel/Excel_TRUE.md @@ -8,8 +8,6 @@ tags: # True - - Excel TRUE(): Sets the logical value to TRUE. The TRUE() function does not require any arguments. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `TRUE` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_TRUNC.md b/docs/build/reference/transformer/Excel/Excel_TRUNC.md index 95b341f4d..9720f9040 100644 --- a/docs/build/reference/transformer/Excel/Excel_TRUNC.md +++ b/docs/build/reference/transformer/Excel/Excel_TRUNC.md @@ -8,8 +8,6 @@ tags: # Trunc - - Excel TRUNC(number; count): Truncates a number to an integer by removing the fractional part of the number according to the precision specified in Tools > Options > OpenOffice.org Calc > Calculate. Number is the number whose decimal places are to be cut off. Count is the number of decimal places which are not cut off. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `TRUNC` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_VAR.md b/docs/build/reference/transformer/Excel/Excel_VAR.md index ebac4d6a7..044904b53 100644 --- a/docs/build/reference/transformer/Excel/Excel_VAR.md +++ b/docs/build/reference/transformer/Excel/Excel_VAR.md @@ -8,8 +8,6 @@ tags: # Var - - Excel VAR(number_1; number_2; ... number_30): Estimates the variance based on a sample. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample based on an entire population. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `VAR` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_VARA.md b/docs/build/reference/transformer/Excel/Excel_VARA.md index ebb5d43bc..2bc082cbb 100644 --- a/docs/build/reference/transformer/Excel/Excel_VARA.md +++ b/docs/build/reference/transformer/Excel/Excel_VARA.md @@ -8,8 +8,6 @@ tags: # Vara - - Excel VARA(value_1; value_2; ... value_30): Estimates a variance based on a sample. The value of text is 0. Value_1; value_2; ... value_30 are values or ranges representing a sample derived from an entire population. Text has the value 0. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `VARA` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_VARP.md b/docs/build/reference/transformer/Excel/Excel_VARP.md index fedbc893c..76d2e0987 100644 --- a/docs/build/reference/transformer/Excel/Excel_VARP.md +++ b/docs/build/reference/transformer/Excel/Excel_VARP.md @@ -8,8 +8,6 @@ tags: # Varp - - Excel VARP(Number_1; number_2; ... number_30): Calculates a variance based on the entire population. Number_1; number_2; ... number_30 are numerical values or ranges representing an entire population. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `VARP` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Excel/Excel_VARPA.md b/docs/build/reference/transformer/Excel/Excel_VARPA.md index d06ec5c77..45f7f9211 100644 --- a/docs/build/reference/transformer/Excel/Excel_VARPA.md +++ b/docs/build/reference/transformer/Excel/Excel_VARPA.md @@ -8,8 +8,6 @@ tags: # Varpa - - Excel VARPA(value_1; value_2; .. .value_30): Calculates the variance based on the entire population. The value of text is 0. Value_1; value_2; ... value_30 are values or ranges representing an entire population. ## Parameter @@ -22,10 +20,6 @@ The name of the Excel function - Datatype: `string` - Default Value: `VARPA` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Extract/regexExtract.md b/docs/build/reference/transformer/Extract/regexExtract.md index 0a419ed61..ea3c01c50 100644 --- a/docs/build/reference/transformer/Extract/regexExtract.md +++ b/docs/build/reference/transformer/Extract/regexExtract.md @@ -8,8 +8,6 @@ tags: # Regex extract - - ## Description The `regexExtract` plugin extracts one or all matches of a regular expression within the input. @@ -67,7 +65,6 @@ is useful for _validating_ the input, `ifMatchesRegex` _conditionally distinguis * Returns: `[afe123]` - --- **returns all matches, when extractAll = true:** @@ -80,7 +77,6 @@ is useful for _validating_ the input, `ifMatchesRegex` _conditionally distinguis * Returns: `[afe123, abcd123]` - --- **returns an empty list if nothing matches:** @@ -92,7 +88,6 @@ is useful for _validating_ the input, `ifMatchesRegex` _conditionally distinguis * Returns: `[]` - --- **returns the match of the first capturing group, which includes two to four letters:** @@ -104,7 +99,6 @@ is useful for _validating_ the input, `ifMatchesRegex` _conditionally distinguis * Returns: `[abcd]` - --- **returns the match of the first capturing group, which includes at least one letter:** @@ -116,7 +110,6 @@ is useful for _validating_ the input, `ifMatchesRegex` _conditionally distinguis * Returns: `[pqrstuvwxyz]` - --- **returns an empty string, because the first capturing group includes the possibility of no letters:** @@ -128,7 +121,6 @@ is useful for _validating_ the input, `ifMatchesRegex` _conditionally distinguis * Returns: `[]` - --- **returns an empty list, because the first capturing group excludes the possibility of no letters:** @@ -140,7 +132,6 @@ is useful for _validating_ the input, `ifMatchesRegex` _conditionally distinguis * Returns: `[]` - --- **Example 8:** @@ -152,33 +143,24 @@ is useful for _validating_ the input, `ifMatchesRegex` _conditionally distinguis * Returns: `[]` - - - ## Parameter ### Regex Regular expression -- ID: `regex` -- Datatype: `string` -- Default Value: `None` - - +* ID: `regex` +* Datatype: `string` +* Default Value: `None` ### Extract all If true, all matches are extracted. If false, only the first match is extracted (default). -- ID: `extractAll` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `extractAll` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Filter/filterByLength.md b/docs/build/reference/transformer/Filter/filterByLength.md index fe05f36bd..3de8bffb5 100644 --- a/docs/build/reference/transformer/Filter/filterByLength.md +++ b/docs/build/reference/transformer/Filter/filterByLength.md @@ -8,11 +8,8 @@ tags: # Filter by length - - Removes all strings that are shorter than 'min' characters and longer than 'max' characters. - ## Parameter ### Min @@ -23,8 +20,6 @@ No description - Datatype: `int` - Default Value: `0` - - ### Max No description @@ -33,10 +28,6 @@ No description - Datatype: `int` - Default Value: `2147483647` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Filter/filterByRegex.md b/docs/build/reference/transformer/Filter/filterByRegex.md index 16ed86643..d0809068c 100644 --- a/docs/build/reference/transformer/Filter/filterByRegex.md +++ b/docs/build/reference/transformer/Filter/filterByRegex.md @@ -8,11 +8,8 @@ tags: # Filter by regex - - Removes all strings that do NOT match a regex. If 'negate' is true, only strings will be removed that match the regex. - ## Parameter ### Regex @@ -23,8 +20,6 @@ No description - Datatype: `string` - Default Value: `None` - - ### Negate No description @@ -33,10 +28,6 @@ No description - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Filter/removeDefaultStopWords.md b/docs/build/reference/transformer/Filter/removeDefaultStopWords.md index e6d6e08bf..6ce3c287b 100644 --- a/docs/build/reference/transformer/Filter/removeDefaultStopWords.md +++ b/docs/build/reference/transformer/Filter/removeDefaultStopWords.md @@ -8,8 +8,6 @@ tags: # Remove default stop words - - This stop word list filter uses the following [list of English stop words](https://gist.githubusercontent.com/rg089/35e00abf8941d72d419224cfd5b5925d/raw/12d899b70156fd0041fa9778d657330b024b959c/stopwords.txt) as a default. @@ -33,7 +31,6 @@ Should you want to provide your own stop word list, either as a resource (e.g. a * Returns: `[To, question]` - --- **Example 2:** @@ -42,13 +39,10 @@ Should you want to provide your own stop word list, either as a resource (e.g. a * Returns: `[It impossible, ]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Filter/removeEmptyValues.md b/docs/build/reference/transformer/Filter/removeEmptyValues.md index 28cc7f9ad..bc3f84cc8 100644 --- a/docs/build/reference/transformer/Filter/removeEmptyValues.md +++ b/docs/build/reference/transformer/Filter/removeEmptyValues.md @@ -8,8 +8,6 @@ tags: # Remove empty values - - Removes empty values. ## Examples @@ -24,7 +22,6 @@ Removes empty values. * Returns: `[value1, value2]` - --- **Example 2:** @@ -33,13 +30,10 @@ Removes empty values. * Returns: `[]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Filter/removeRemoteStopWords.md b/docs/build/reference/transformer/Filter/removeRemoteStopWords.md index 8ae7994d5..0cbd4217f 100644 --- a/docs/build/reference/transformer/Filter/removeRemoteStopWords.md +++ b/docs/build/reference/transformer/Filter/removeRemoteStopWords.md @@ -8,8 +8,6 @@ tags: # Remove remote stop words - - The stop word list is retrieved from a remote URL such as [this German stop word list](https://raw.githubusercontent.com/stopwords-iso/stopwords-de/refs/heads/master/stopwords-de.txt). @@ -40,7 +38,6 @@ Additionally, notice the simpler filter 'removeDefaultStopWords', which uses a d * Returns: `[To, question]` - --- **Example 2:** @@ -49,33 +46,24 @@ Additionally, notice the simpler filter 'removeDefaultStopWords', which uses a d * Returns: `[It impossible, ]` - - - ## Parameter ### Stop word list url URL of the stop word list -- ID: `stopWordListUrl` -- Datatype: `string` -- Default Value: `https://raw.githubusercontent.com/stopwords-iso/stopwords-en/refs/heads/master/stopwords-en.txt` - - +* ID: `stopWordListUrl` +* Datatype: `string` +* Default Value: `https://raw.githubusercontent.com/stopwords-iso/stopwords-en/refs/heads/master/stopwords-en.txt` ### Separator RegEx for detecting words -- ID: `separator` -- Datatype: `string` -- Default Value: `[\s-]+` - - - - +* ID: `separator` +* Datatype: `string` +* Default Value: `[\s-]+` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Filter/removeStopWords.md b/docs/build/reference/transformer/Filter/removeStopWords.md index 097d80427..488941dd0 100644 --- a/docs/build/reference/transformer/Filter/removeStopWords.md +++ b/docs/build/reference/transformer/Filter/removeStopWords.md @@ -8,8 +8,6 @@ tags: # Remove stop words - - The stop word list is specified as a resource, e.g. a file identical to [this German stop word list](https://raw.githubusercontent.com/stopwords-iso/stopwords-de/refs/heads/master/stopwords-de.txt). @@ -38,7 +36,6 @@ Additionally, notice the simpler filter 'removeDefaultStopWords', which uses a d * Returns: `[, question]` - --- **Example 2:** @@ -47,33 +44,24 @@ Additionally, notice the simpler filter 'removeDefaultStopWords', which uses a d * Returns: `[impossible, ]` - - - ## Parameter ### Stop word list Resource for the stop word list -- ID: `stopWordList` -- Datatype: `resource` -- Default Value: `None` - - +* ID: `stopWordList` +* Datatype: `resource` +* Default Value: `None` ### Separator RegEx for detecting words -- ID: `separator` -- Datatype: `string` -- Default Value: `[\s-]+` - - - - +* ID: `separator` +* Datatype: `string` +* Default Value: `[\s-]+` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Filter/removeValues.md b/docs/build/reference/transformer/Filter/removeValues.md index f81f8fd7a..2683f0760 100644 --- a/docs/build/reference/transformer/Filter/removeValues.md +++ b/docs/build/reference/transformer/Filter/removeValues.md @@ -8,11 +8,8 @@ tags: # Remove values - - Removes values that contain words from a blacklist. The blacklist values are separated with commas. - ## Parameter ### Blacklist @@ -23,10 +20,6 @@ No description - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Geo/RetrieveCoordinates.md b/docs/build/reference/transformer/Geo/RetrieveCoordinates.md index 7c294022c..e602faa05 100644 --- a/docs/build/reference/transformer/Geo/RetrieveCoordinates.md +++ b/docs/build/reference/transformer/Geo/RetrieveCoordinates.md @@ -8,9 +8,6 @@ tags: # Retrieve coordinates - - - **Configuration** The geocoding service to be queried for searches can be set up in the configuration. @@ -44,7 +41,6 @@ By default, individual requests to the geocoding service are not logged. To enab com.eccenca.di.geo=DEBUG } - ## Parameter `None` @@ -58,6 +54,3 @@ Additional URL parameters to be attached to each HTTP search request. Example: ' - ID: `additionalParameters` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/transformer/Geo/RetrieveLatitude.md b/docs/build/reference/transformer/Geo/RetrieveLatitude.md index a8b4c89c1..ca80644da 100644 --- a/docs/build/reference/transformer/Geo/RetrieveLatitude.md +++ b/docs/build/reference/transformer/Geo/RetrieveLatitude.md @@ -8,9 +8,6 @@ tags: # Retrieve latitude - - - **Configuration** The geocoding service to be queried for searches can be set up in the configuration. @@ -44,7 +41,6 @@ By default, individual requests to the geocoding service are not logged. To enab com.eccenca.di.geo=DEBUG } - ## Parameter `None` @@ -58,6 +54,3 @@ Additional URL parameters to be attached to each HTTP search request. Example: ' - ID: `additionalParameters` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/transformer/Geo/RetrieveLongitude.md b/docs/build/reference/transformer/Geo/RetrieveLongitude.md index 66d3d786f..8da180fbe 100644 --- a/docs/build/reference/transformer/Geo/RetrieveLongitude.md +++ b/docs/build/reference/transformer/Geo/RetrieveLongitude.md @@ -8,9 +8,6 @@ tags: # Retrieve longitude - - - **Configuration** The geocoding service to be queried for searches can be set up in the configuration. @@ -44,7 +41,6 @@ By default, individual requests to the geocoding service are not logged. To enab com.eccenca.di.geo=DEBUG } - ## Parameter `None` @@ -58,6 +54,3 @@ Additional URL parameters to be attached to each HTTP search request. Example: ' - ID: `additionalParameters` - Datatype: `string` - Default Value: `None` - - - diff --git a/docs/build/reference/transformer/Linguistic/NYSIIS.md b/docs/build/reference/transformer/Linguistic/NYSIIS.md index 2fba1d320..9c0a78112 100644 --- a/docs/build/reference/transformer/Linguistic/NYSIIS.md +++ b/docs/build/reference/transformer/Linguistic/NYSIIS.md @@ -8,8 +8,6 @@ tags: # NYSIIS - - This transformer plugin implements the **NYSIIS** phonetic algorithm for encoding names. The acronym NYSIIS stands for _New York State Identification and Intelligence System_. This so-called _phonetic code_ @@ -30,12 +28,12 @@ necessarily contain this limitation. This plugin doesn't. ### Modified NYSIIS The **Modified NYSIIS** is an improvement of the NYSIIS algorithm. Its working is illustrated, step by step, in -http://www.dropby.com/NYSIIS.html. +. ## Examples We can get an idea of the output of the NYSIIS algorithm using an online version of it such as the already mentioned -http://www.dropby.com/NYSIIS.html. It contains both the (plain) NYSIIS and the _modified_ NYSIIS algorithms. +. It contains both the (plain) NYSIIS and the _modified_ NYSIIS algorithms. As a comparison of the two versions of NYSIIS, we give a few examples: @@ -51,21 +49,16 @@ implemented by this (Metaphone) and the [NYSIIS](https://en.wikipedia.org/wiki/New_York_State_Identification_and_Intelligence_System) algorithms. The corresponding linguistic transformer plugins are named accordingly. - ## Parameter ### Refined No description -- ID: `refined` -- Datatype: `boolean` -- Default Value: `true` - - - - +* ID: `refined` +* Datatype: `boolean` +* Default Value: `true` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Linguistic/metaphone.md b/docs/build/reference/transformer/Linguistic/metaphone.md index 8c153415a..969a179db 100644 --- a/docs/build/reference/transformer/Linguistic/metaphone.md +++ b/docs/build/reference/transformer/Linguistic/metaphone.md @@ -8,8 +8,6 @@ tags: # Metaphone - - This transformer plugin implements the **Metaphone** phonetic algorithm for indexing words according to English. ## Description @@ -40,11 +38,10 @@ Related phonetic algorithms are the different variations or improvements of the (Metaphone) and the [`Metaphone`](https://en.wikipedia.org/wiki/Metaphone) algorithms. The corresponding linguistic transformer plugins are named accordingly. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Linguistic/normalizeChars.md b/docs/build/reference/transformer/Linguistic/normalizeChars.md index 4effbc86c..af22a0c11 100644 --- a/docs/build/reference/transformer/Linguistic/normalizeChars.md +++ b/docs/build/reference/transformer/Linguistic/normalizeChars.md @@ -8,15 +8,12 @@ tags: # Normalize chars - - Replaces diacritical characters with non-diacritical ones (eg, ö -> o), plus some specialities like transforming æ -> ae, ß -> ss. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Linguistic/soundex.md b/docs/build/reference/transformer/Linguistic/soundex.md index c97783905..7098c3a83 100644 --- a/docs/build/reference/transformer/Linguistic/soundex.md +++ b/docs/build/reference/transformer/Linguistic/soundex.md @@ -8,8 +8,6 @@ tags: # Soundex - - This transformer plugin implements the **Soundex** phonetic algorithm for indexing names by their English sounds. ## Description @@ -58,7 +56,7 @@ used by setting the plugin parameter `refined` to `true` (default). Its mapping ### Soundex We can get an idea of the output of the Soundex algorithm using an online Soundex Converter such as -https://www.mainegenealogy.net/soundex_converter.asp. +. * `robert` and `rupert` lead to the same Soundex index: `r163`. * `euler` leads to `e460`, `gauss` is `g200` and `hilbert` corresponds to `h416`. @@ -68,7 +66,7 @@ https://www.mainegenealogy.net/soundex_converter.asp. * `braz` and `broz` lead to the same Refined Soundex index: `b1905`. * `caren`, `carren`, `coram`, `corran`, `curreen` and `curwen` are all encoded with `c30908`. * `hairs`, `hark`, `hars`, `hayers`, `heers` and `hiers` are all mapped to `h093`. -* All sorts of variations of `lambard`, such as `lambart`, `lambert`, `lambird` or `lampaert`, lead to `l7081096`. +* All sorts of variations of `lambard`, such as `lambart`, `lambert`, `lambird` or `lampaert`, lead to `l7081096`. ## Related plugins @@ -77,21 +75,16 @@ Other phonetic algorithms usually associated with Soundex are the variations or and [`Metaphone`](https://en.wikipedia.org/wiki/Metaphone) algorithms. The corresponding linguistic transformer plugins are named accordingly. - ## Parameter ### Refined No description -- ID: `refined` -- Datatype: `boolean` -- Default Value: `true` - - - - +* ID: `refined` +* Datatype: `boolean` +* Default Value: `true` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Linguistic/stem.md b/docs/build/reference/transformer/Linguistic/stem.md index 75940976c..d6b1f804e 100644 --- a/docs/build/reference/transformer/Linguistic/stem.md +++ b/docs/build/reference/transformer/Linguistic/stem.md @@ -8,15 +8,12 @@ tags: # Stem - - Stems a string using the Porter Stemmer. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Metadata/fileHash.md b/docs/build/reference/transformer/Metadata/fileHash.md index e643f8668..e06cdbb8f 100644 --- a/docs/build/reference/transformer/Metadata/fileHash.md +++ b/docs/build/reference/transformer/Metadata/fileHash.md @@ -8,14 +8,11 @@ tags: # File hash - - Calculates the hash sum of a file. The hash sum is cached so that subsequent calls to this operator are fast. Note that initially and every time the specified resource has been updated, this operator might take a long time (depending on the file size). This operator supports using different hash algorithms from the [Secure Hash Algorithms family](https://en.wikipedia.org/wiki/Secure_Hash_Algorithms) (SHA, e.g. SHA256) and two algorithms from the [Message-Digest Algorithm family](https://en.wikipedia.org/wiki/MD5) (MD2 / MD5). Please be aware that some of these algorithms are not secure regarding collision- and other attacks. Note: This transform operator ignores any inputs. - ## Parameter ### File @@ -26,8 +23,6 @@ File for which the hash sum will be calculated. If left empty, the file of the i - Datatype: `resource` - Default Value: `None` - - ### Algorithm The hash algorithm to be used. @@ -36,10 +31,6 @@ The hash algorithm to be used. - Datatype: `string` - Default Value: `SHA256` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Metadata/inputFileAttributes.md b/docs/build/reference/transformer/Metadata/inputFileAttributes.md index 4e1415817..43f8ae92b 100644 --- a/docs/build/reference/transformer/Metadata/inputFileAttributes.md +++ b/docs/build/reference/transformer/Metadata/inputFileAttributes.md @@ -8,11 +8,8 @@ tags: # Input file attributes - - Retrieves a metadata attribute from the input file (such as the file name). - ## Parameter ### Attribute @@ -23,10 +20,6 @@ File attribute to be retrieved from the input dataset. - Datatype: `enumeration` - Default Value: `name` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Metadata/inputTaskAttributes.md b/docs/build/reference/transformer/Metadata/inputTaskAttributes.md index 09faa10d4..0e89098f4 100644 --- a/docs/build/reference/transformer/Metadata/inputTaskAttributes.md +++ b/docs/build/reference/transformer/Metadata/inputTaskAttributes.md @@ -8,11 +8,8 @@ tags: # Input task attributes - - Retrieves individual attributes from the input task (such as the modified date) or the entire task as JSON. - ## Parameter ### Path @@ -23,10 +20,6 @@ Path to retrieve from the JSON, such as 'metadata/modified'. If left empty, the - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/alphaReduce.md b/docs/build/reference/transformer/Normalize/alphaReduce.md index fd4fa278f..d6d2db8f0 100644 --- a/docs/build/reference/transformer/Normalize/alphaReduce.md +++ b/docs/build/reference/transformer/Normalize/alphaReduce.md @@ -8,15 +8,12 @@ tags: # Strip non-alphabetic characters - - Strips all non-alphabetic characters from a string. Spaces are retained. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/camelCase.md b/docs/build/reference/transformer/Normalize/camelCase.md index d6c5ce42c..c6fda5b29 100644 --- a/docs/build/reference/transformer/Normalize/camelCase.md +++ b/docs/build/reference/transformer/Normalize/camelCase.md @@ -8,8 +8,6 @@ tags: # Camel case - - Converts a string to camel case. Upper camel case is the default, lower camel case can be chosen. ## Examples @@ -27,7 +25,6 @@ Converts a string to camel case. Upper camel case is the default, lower camel ca * Returns: `[HelloWorld]` - --- **A sentence with several words is converted to a single word written in lowerCamelCase:** @@ -39,7 +36,6 @@ Converts a string to camel case. Upper camel case is the default, lower camel ca * Returns: `[helloWorld]` - --- **A single lowercase letter is converted to UpperCamelCase, i.e. capitalized:** @@ -51,7 +47,6 @@ Converts a string to camel case. Upper camel case is the default, lower camel ca * Returns: `[H]` - --- **A single lowercase letter is converted to lowerCamelCase (aka. dromedary case), i.e. uncapitalized:** @@ -63,7 +58,6 @@ Converts a string to camel case. Upper camel case is the default, lower camel ca * Returns: `[h]` - --- **An empty space is removed. The dromedary/lower case is irrelevant here:** @@ -75,7 +69,6 @@ Converts a string to camel case. Upper camel case is the default, lower camel ca * Returns: `[]` - --- **An empty space is removed. The upper case is irrelevant here:** @@ -87,23 +80,16 @@ Converts a string to camel case. Upper camel case is the default, lower camel ca * Returns: `[]` - - - ## Parameter ### Dromedary case If true, lower camel case (aka. dromedary case) is used, otherwise upper camel case is used. -- ID: `isDromedary` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `isDromedary` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/capitalize.md b/docs/build/reference/transformer/Normalize/capitalize.md index 91e51282a..7e0375542 100644 --- a/docs/build/reference/transformer/Normalize/capitalize.md +++ b/docs/build/reference/transformer/Normalize/capitalize.md @@ -8,8 +8,6 @@ tags: # Capitalize - - Capitalizes the string i.e. converts the first character to upper case. If 'allWords' is set to true, all words are capitalized and not only the first character. ## Examples @@ -27,7 +25,6 @@ Capitalizes the string i.e. converts the first character to upper case. If 'allW * Returns: `[Capitalize me]` - --- **Example 2:** @@ -39,23 +36,16 @@ Capitalizes the string i.e. converts the first character to upper case. If 'allW * Returns: `[Capitalize Me]` - - - ## Parameter ### All words No description -- ID: `allWords` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `allWords` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/htmlCleaner.md b/docs/build/reference/transformer/Normalize/htmlCleaner.md index be1dda482..be7ebfdc2 100644 --- a/docs/build/reference/transformer/Normalize/htmlCleaner.md +++ b/docs/build/reference/transformer/Normalize/htmlCleaner.md @@ -8,15 +8,12 @@ tags: # Clean HTML - - This transformer cleans HTML markup using a whitelist of HTML tags. It allows the selection of HTML sections with XPath or CSS selector expressions. If the tag or attribute whitelists are left empty, default whitelists will be used. This behaviour can be changed. To remove all the HTML tags and retain plain text, keep the defaults and turn off the "Default tags and attributes" toggle. The operator takes two inputs: the page HTML and, optionally, the page URL which may be needed to resolve relative links in the HTML page. - ## Parameter ### Tag white list @@ -27,8 +24,6 @@ Tags to keep in the cleaned output. - Datatype: `traversable[string]` - Default Value: `None` - - ### Attribute white list Attributes to keep in the cleaned output. @@ -37,8 +32,6 @@ Attributes to keep in the cleaned output. - Datatype: `traversable[string]` - Default Value: `None` - - ### Selectors CSS or XPath queries for selection of content. CSS selectors can be pipe separated for non-sequential execution. @@ -47,8 +40,6 @@ CSS or XPath queries for selection of content. CSS selectors can be pipe separat - Datatype: `traversable[string]` - Default Value: `None` - - ### Method Selects use of XPath or CSS selectors. @@ -57,8 +48,6 @@ Selects use of XPath or CSS selectors. - Datatype: `enumeration` - Default Value: `xPath` - - ### Default tags and attributes Use defaults for empty tag and attribute whitelists. If the attribute while list is empty, it will default to: "class", "id", "href", "src" If the tag while list is empty, it will default to: "a", "b", "blockquote", "br", "caption", "cite", "code", "col", "colgroup", "dd", "div", "dl", "dt", "em", "h1", "h2", "h3", "h4", "h5", "h6","i", "img", "li", "ol", "p", "pre", "q", "small", "span", "strike", "strong","sub", "sup", "table", "tbody", "td", "tfoot", "th", "thead", "tr", "u", "ul". @@ -67,10 +56,6 @@ Use defaults for empty tag and attribute whitelists. If the attribute while list - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/lowerCase.md b/docs/build/reference/transformer/Normalize/lowerCase.md index 84a919884..4ea9075dc 100644 --- a/docs/build/reference/transformer/Normalize/lowerCase.md +++ b/docs/build/reference/transformer/Normalize/lowerCase.md @@ -8,8 +8,6 @@ tags: # Lower case - - Converts a string to lower case. ## Examples @@ -24,13 +22,10 @@ Converts a string to lower case. * Returns: `[john, lena]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/removeBlanks.md b/docs/build/reference/transformer/Normalize/removeBlanks.md index 1d364e1e1..02c40bf25 100644 --- a/docs/build/reference/transformer/Normalize/removeBlanks.md +++ b/docs/build/reference/transformer/Normalize/removeBlanks.md @@ -8,15 +8,12 @@ tags: # Remove blanks - - Remove whitespace from a string. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/removeDuplicates.md b/docs/build/reference/transformer/Normalize/removeDuplicates.md index f858e9da7..e21b68c46 100644 --- a/docs/build/reference/transformer/Normalize/removeDuplicates.md +++ b/docs/build/reference/transformer/Normalize/removeDuplicates.md @@ -8,15 +8,12 @@ tags: # Remove duplicates - - Removes duplicated values, making a value sequence distinct. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/removeParentheses.md b/docs/build/reference/transformer/Normalize/removeParentheses.md index f730586bb..3899f13b3 100644 --- a/docs/build/reference/transformer/Normalize/removeParentheses.md +++ b/docs/build/reference/transformer/Normalize/removeParentheses.md @@ -8,15 +8,12 @@ tags: # Remove parentheses - - Remove all parentheses including their content, e.g., transforms 'Berlin (City)' -> 'Berlin'. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/removeSpecialChars.md b/docs/build/reference/transformer/Normalize/removeSpecialChars.md index 18c59c6d9..167519ec6 100644 --- a/docs/build/reference/transformer/Normalize/removeSpecialChars.md +++ b/docs/build/reference/transformer/Normalize/removeSpecialChars.md @@ -8,15 +8,12 @@ tags: # Remove special chars - - Remove special characters (including punctuation) from a string. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/sortWords.md b/docs/build/reference/transformer/Normalize/sortWords.md index 1efac0d53..64f602876 100644 --- a/docs/build/reference/transformer/Normalize/sortWords.md +++ b/docs/build/reference/transformer/Normalize/sortWords.md @@ -8,8 +8,6 @@ tags: # Sort words - - Sorts all words in each value lexicographically. ## Examples @@ -24,7 +22,6 @@ Sorts all words in each value lexicographically. * Returns: `[]` - --- **Example 2:** @@ -33,7 +30,6 @@ Sorts all words in each value lexicographically. * Returns: `[a b c]` - --- **Example 3:** @@ -42,33 +38,24 @@ Sorts all words in each value lexicographically. * Returns: `[Hamburg Hans Hansa, Marburg München]` - - - ## Parameter ### Split regex The regular expression used to split values into words. -- ID: `splitRegex` -- Datatype: `string` -- Default Value: `\s+` - - +* ID: `splitRegex` +* Datatype: `string` +* Default Value: `\s+` ### Glue Separator to be inserted between sorted words. -- ID: `glue` -- Datatype: `string` -- Default Value: ` ` - - - - +* ID: `glue` +* Datatype: `string` +* Default Value: ` ` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/trim.md b/docs/build/reference/transformer/Normalize/trim.md index b28653ea7..4720e74c2 100644 --- a/docs/build/reference/transformer/Normalize/trim.md +++ b/docs/build/reference/transformer/Normalize/trim.md @@ -8,15 +8,12 @@ tags: # Trim - - Remove leading and trailing whitespaces. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/upperCase.md b/docs/build/reference/transformer/Normalize/upperCase.md index c81ff4658..3f30b7aa5 100644 --- a/docs/build/reference/transformer/Normalize/upperCase.md +++ b/docs/build/reference/transformer/Normalize/upperCase.md @@ -8,15 +8,12 @@ tags: # Upper case - - Converts a string to upper case. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/uriFix.md b/docs/build/reference/transformer/Normalize/uriFix.md index aa731ccc6..b5cc4c46a 100644 --- a/docs/build/reference/transformer/Normalize/uriFix.md +++ b/docs/build/reference/transformer/Normalize/uriFix.md @@ -8,8 +8,6 @@ tags: # Fix URI - - Generates valid absolute URIs from the given values. Already valid absolute URIs are left untouched. ## Examples @@ -24,7 +22,6 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[urn:url-encoded-value:ab]` - --- **URI reserved characters are encoded:** @@ -33,7 +30,6 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[urn:url-encoded-value:a%26b]` - --- **Valid absolute URIs are forwarded unchanged:** @@ -42,7 +38,6 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[http://example.org/some/path]` - --- **Query parameters and fragments are left unchanged:** @@ -51,7 +46,6 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[http://example.org/path?query=some+stuff#hashtag]` - --- **Valid URNs are forwarded unchanged:** @@ -60,7 +54,6 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[urn:valid:uri]` - --- **Special characters are encoded:** @@ -69,7 +62,6 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[http://www.broken%20domain.com/broken%20weird%20path%20%C3%A4%C3%B6%C3%BC/nice/path/andNowSomeFragment#fragment%C3%A4%C3%B6%C3%BC]` - --- **Hash signs are only encoded if they don't denote a fragment:** @@ -78,7 +70,6 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[http://domain/#%23path%23]` - --- **Invalid URIs are fully encoded:** @@ -87,7 +78,6 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[urn:url-encoded-value:http+%3A+invalid+URI]` - --- **Leading and trailing spaces are removed:** @@ -96,7 +86,6 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[http://domain.com/%5BsquareBrackets%5D]` - --- **Example 10:** @@ -105,23 +94,16 @@ Generates valid absolute URIs from the given values. Already valid absolute URIs * Returns: `[urn:url-encoded-value:100%25]` - - - ## Parameter ### Uri prefix No description -- ID: `uriPrefix` -- Datatype: `string` -- Default Value: `urn:url-encoded-value:` - - - - +* ID: `uriPrefix` +* Datatype: `string` +* Default Value: `urn:url-encoded-value:` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Normalize/urlEncode.md b/docs/build/reference/transformer/Normalize/urlEncode.md index cc84618ac..798116325 100644 --- a/docs/build/reference/transformer/Normalize/urlEncode.md +++ b/docs/build/reference/transformer/Normalize/urlEncode.md @@ -8,8 +8,6 @@ tags: # Encode URL - - URL encodes the string. ## Examples @@ -24,7 +22,6 @@ URL encodes the string. * Returns: `[ab]` - --- **Example 2:** @@ -33,7 +30,6 @@ URL encodes the string. * Returns: `[a%26b]` - --- **Example 3:** @@ -42,9 +38,6 @@ URL encodes the string. * Returns: `[http%3A%2F%2Fexample.org%2Fsome%2Fpath]` - - - ## Parameter `None` @@ -55,9 +48,6 @@ URL encodes the string. The character encoding. -- ID: `encoding` -- Datatype: `string` -- Default Value: `UTF-8` - - - +* ID: `encoding` +* Datatype: `string` +* Default Value: `UTF-8` diff --git a/docs/build/reference/transformer/Numeric/PhysicalQuantitiesNormalizer.md b/docs/build/reference/transformer/Numeric/PhysicalQuantitiesNormalizer.md index 930e1b1df..ac3a7e90b 100644 --- a/docs/build/reference/transformer/Numeric/PhysicalQuantitiesNormalizer.md +++ b/docs/build/reference/transformer/Numeric/PhysicalQuantitiesNormalizer.md @@ -8,8 +8,6 @@ tags: # Normalize physical quantity - - This transformer normalizes physical quantities. Upon its creation, the Physical Quantities Normalizer can be configured by specifying a _target unit_ and a _number format_. Both parameters are optional. @@ -25,6 +23,7 @@ The physical magnitudes are expected to be expressed in the form `{Number}{UnitP Spaces between the number and the physical unit, consisting of unit prefix and symbol, are optional. Additionally: + * If one input is provided, the physical quantities are parsed from the provided strings of the form `"1 km"`. * If two inputs are provided, the numeric values are parsed from the first input and the units from the second input. @@ -36,6 +35,7 @@ Additionally: Time is expressed in seconds (symbol: `s`). The following alternative symbols are supported: + * `mo_s`: day*29.53059 * `mo_g`: year/12.0 * `a`: day*365.25 @@ -48,11 +48,11 @@ The following alternative symbols are supported: * `a_t`: day*365.24219 * `d`: day - ### Length Length is expressed in metres (symbol: `m`). The following alternative symbols are supported: + * `in`: c(cm*254.0) * `nmi`: m*1852.0 * `Ao`: dnm @@ -65,11 +65,11 @@ The following alternative symbols are supported: * `mi`: ((c(cm*254.0))*12.0)*5280.0 * `hd`: (c(cm*254.0))*4.0 - ### Mass Mass is expressed in kilograms (symbol: `kg`). The following alternative symbols are supported: + * `lb`: lb * `ston`: hlb*20.0 * `t`: Mg @@ -83,21 +83,20 @@ The following alternative symbols are supported: * `dr`: oz/16.0 * `lton`: (lb*112.0)*20.0 - ### Electric current Electric current is expressed in amperes (symbol: `A`). The following alternative symbols are supported: + * `Bi`: daA * `Gb`: cm·(A/m)*250.0/[one?] - ### Temperature Temperature is expressed in kelvins (symbol: `K`). The following alternative symbols are supported: -* `Cel`: ℃ +* `Cel`: ℃ ### Amount of substance @@ -111,6 +110,7 @@ Luminous intensity is expressed in candelas (symbol: `cd`). Area is expressed in square metres (symbol: `m²`). The following alternative symbols are supported: + * `m2`: m² * `ar`: hm² * `syd`: ((c(cm*254.0))*12.0)*3.0² @@ -119,11 +119,11 @@ The following alternative symbols are supported: * `sft`: (c(cm*254.0))*12.0² * `sin`: c(cm*254.0)² - ### Volume Volume is expressed in cubic metres (symbol: `㎥`). The following alternative symbols are supported: + * `st`: [㎥?] * `bf`: (c(cm*254.0)³)*144.0 * `cyd`: ((c(cm*254.0))*12.0)*3.0³ @@ -134,66 +134,64 @@ The following alternative symbols are supported: * `cft`: (c(cm*254.0))*12.0³ * `m3`: ㎥ - ### Energy Energy is expressed in joules (symbol: `J`). The following alternative symbols are supported: + * `cal_IT`: (J*41868.0)/10000.0 * `eV`: J*1.602176487E-19 * `cal_m`: (J*419002.0)/100000.0 * `cal`: m(J*4184.0) * `cal_th`: m(J*4184.0) - ### Angle Angle is expressed in radians (symbol: `rad`). The following alternative symbols are supported: + * `circ`: [one?]·rad*2.0 * `gon`: ([one?]·rad/180.0)*0.9 * `deg`: [one?]·rad/180.0 * `'`: ([one?]·rad/180.0)/60.0 * `''`: (([one?]·rad/180.0)/60.0)/60.0 - ### Others -- `1/m`, derived units: `Ky`: c(1/m) -- `kg/(m·s)`, derived units: `P`: g/(s·cm) -- `bit/s`, derived units: `Bd`: bit/s -- `bit`, derived units: `By`: bit*8.0 -- `Sv` -- `N` -- `Ω`, derived units: `Ohm`: Ω -- `T`, derived units: `G`: T/10000.0 -- `sr`, derived units: `sph`: [one?]·sr*4.0 -- `F` -- `C/kg`, derived units: `R`: (C/kg)*2.58E-4 -- `cd/m²`, derived units: `sb`: cd/cm², `Lmb`: cd/([one?]·cm²) -- `Pa`, derived units: `bar`: Pa*100000.0, `atm`: Pa*101325.0 -- `kg/(m·s²)`, derived units: `att`: k(g·(m/s²)*9.80665)/cm² -- `m²/s`, derived units: `St`: cm²/s -- `A/m`, derived units: `Oe`: (A/m)*250.0/[one?] -- `kg·m²/s²`, derived units: `erg`: cm²·g/s² -- `kg/m³`, derived units: `g%`: g/dl -- `mho` -- `V` -- `lx`, derived units: `ph`: lx/10000.0 -- `m/s²`, derived units: `Gal`: cm/s², `m/s2`: m/s² -- `m/s`, derived units: `kn`: m*1852.0/h -- `m·kg/s²`, derived units: `gf`: g·(m/s²)*9.80665, `lbf`: lb·(m/s²)*9.80665, `dyn`: cm·g/s² -- `m²/s²`, derived units: `RAD`: cm²·g/(s²·hg), `REM`: cm²·g/(s²·hg) -- `C` -- `Gy` -- `Hz` -- `H` -- `lm` -- `W` -- `Wb`, derived units: `Mx`: Wb/1.0E8 -- `Bq`, derived units: `Ci`: Bq*3.7E10 -- `S` - +* `1/m`, derived units: `Ky`: c(1/m) +* `kg/(m·s)`, derived units: `P`: g/(s·cm) +* `bit/s`, derived units: `Bd`: bit/s +* `bit`, derived units: `By`: bit*8.0 +* `Sv` +* `N` +* `Ω`, derived units: `Ohm`: Ω +* `T`, derived units: `G`: T/10000.0 +* `sr`, derived units: `sph`: [one?]·sr*4.0 +* `F` +* `C/kg`, derived units: `R`: (C/kg)*2.58E-4 +* `cd/m²`, derived units: `sb`: cd/cm², `Lmb`: cd/([one?]·cm²) +* `Pa`, derived units: `bar`: Pa*100000.0, `atm`: Pa*101325.0 +* `kg/(m·s²)`, derived units: `att`: k(g·(m/s²)*9.80665)/cm² +* `m²/s`, derived units: `St`: cm²/s +* `A/m`, derived units: `Oe`: (A/m)*250.0/[one?] +* `kg·m²/s²`, derived units: `erg`: cm²·g/s² +* `kg/m³`, derived units: `g%`: g/dl +* `mho` +* `V` +* `lx`, derived units: `ph`: lx/10000.0 +* `m/s²`, derived units: `Gal`: cm/s², `m/s2`: m/s² +* `m/s`, derived units: `kn`: m*1852.0/h +* `m·kg/s²`, derived units: `gf`: g·(m/s²)*9.80665, `lbf`: lb·(m/s²)*9.80665, `dyn`: cm·g/s² +* `m²/s²`, derived units: `RAD`: cm²·g/(s²·hg), `REM`: cm²·g/(s²·hg) +* `C` +* `Gy` +* `Hz` +* `H` +* `lm` +* `W` +* `Wb`, derived units: `Mx`: Wb/1.0E8 +* `Bq`, derived units: `Ci`: Bq*3.7E10 +* `S` ## Examples @@ -207,7 +205,6 @@ The following alternative symbols are supported: * Returns: `[1000.0]` - --- **Example 2:** @@ -216,7 +213,6 @@ The following alternative symbols are supported: * Returns: `[0.3048]` - --- **Example 3:** @@ -225,7 +221,6 @@ The following alternative symbols are supported: * Returns: `[0.45359237]` - --- **Example 4:** @@ -234,7 +229,6 @@ The following alternative symbols are supported: * Returns: `[1.0]` - --- **Example 5:** @@ -243,7 +237,6 @@ The following alternative symbols are supported: * Returns: `[-1000000.0]` - --- **Example 6:** @@ -255,7 +248,6 @@ The following alternative symbols are supported: * Returns: `[1000.5]` - --- **Example 7:** @@ -264,7 +256,6 @@ The following alternative symbols are supported: * Returns: `[1000.5]` - --- **Example 8:** @@ -276,7 +267,6 @@ The following alternative symbols are supported: * Returns: `[0.621371192237334]` - --- **Example 9:** @@ -289,7 +279,6 @@ The following alternative symbols are supported: * Returns: `[]` * **Throws error:** `ValidationException` - --- **Example 10:** @@ -299,7 +288,6 @@ The following alternative symbols are supported: * Returns: `[]` * **Throws error:** `ValidationException` - --- **Example 11:** @@ -309,7 +297,6 @@ The following alternative symbols are supported: * Returns: `[1000.0]` - --- **Example 12:** @@ -319,7 +306,6 @@ The following alternative symbols are supported: * Returns: `[1000.0, 10.0]` - --- **Example 13:** @@ -330,33 +316,24 @@ The following alternative symbols are supported: * Returns: `[]` * **Throws error:** `ValidationException` - - - ## Parameter ### Target unit Target unit. Can be left empty to convert to the respective SI base units. -- ID: `targetUnit` -- Datatype: `string` -- Default Value: `None` - - +* ID: `targetUnit` +* Datatype: `string` +* Default Value: `None` ### Number format The IETF BCP 47 language tag, e.g., 'en'. -- ID: `numberFormat` -- Datatype: `string` -- Default Value: `en` - - - - +* ID: `numberFormat` +* Datatype: `string` +* Default Value: `en` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Numeric/aggregateNumbers.md b/docs/build/reference/transformer/Numeric/aggregateNumbers.md index 68ee3364f..3aa45d389 100644 --- a/docs/build/reference/transformer/Numeric/aggregateNumbers.md +++ b/docs/build/reference/transformer/Numeric/aggregateNumbers.md @@ -8,8 +8,6 @@ tags: # Aggregate numbers - - The `aggregateNumbers` plugin applies an aggregation operator to the sequence of input values. The allowed aggregation operators are **sum** (`+`), **product** (`*`), **minimum** (`min`), **maximum** (`max`) and **average** (`average`). @@ -45,7 +43,6 @@ numbers, they will be ignored. * Returns: `[3.0]` - --- **Example 2:** @@ -57,7 +54,6 @@ numbers, they will be ignored. * Returns: `[8.0]` - --- **Example 3:** @@ -69,7 +65,6 @@ numbers, they will be ignored. * Returns: `[1.0]` - --- **Example 4:** @@ -81,7 +76,6 @@ numbers, they will be ignored. * Returns: `[3.0]` - --- **Example 5:** @@ -93,7 +87,6 @@ numbers, they will be ignored. * Returns: `[2.0]` - --- **Example 6:** @@ -105,7 +98,6 @@ numbers, they will be ignored. * Returns: `[Infinity]` - --- **Example 7:** @@ -117,7 +109,6 @@ numbers, they will be ignored. * Returns: `[Infinity]` - --- **Example 8:** @@ -129,7 +120,6 @@ numbers, they will be ignored. * Returns: `[1.0]` - --- **Example 9:** @@ -141,7 +131,6 @@ numbers, they will be ignored. * Returns: `[Infinity]` - --- **Example 10:** @@ -153,7 +142,6 @@ numbers, they will be ignored. * Returns: `[Infinity]` - --- **Example 11:** @@ -165,7 +153,6 @@ numbers, they will be ignored. * Returns: `[1.0]` - --- **Example 12:** @@ -177,7 +164,6 @@ numbers, they will be ignored. * Returns: `[1.0]` - --- **Example 13:** @@ -189,7 +175,6 @@ numbers, they will be ignored. * Returns: `[1.0]` - --- **Example 14:** @@ -201,7 +186,6 @@ numbers, they will be ignored. * Returns: `[1.0]` - --- **Example 15:** @@ -213,7 +197,6 @@ numbers, they will be ignored. * Returns: `[1.0]` - --- **Example 16:** @@ -225,23 +208,16 @@ numbers, they will be ignored. * Returns: `[]` - - - ## Parameter ### Operator The aggregation operation to be applied to all values. One of `+`, `*`, `min`, `max`, `average`. -- ID: `operator` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `operator` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Numeric/cmem-plugin-number-conversion.md b/docs/build/reference/transformer/Numeric/cmem-plugin-number-conversion.md index b12f8842a..b753089e7 100644 --- a/docs/build/reference/transformer/Numeric/cmem-plugin-number-conversion.md +++ b/docs/build/reference/transformer/Numeric/cmem-plugin-number-conversion.md @@ -29,8 +29,6 @@ Source Number Base - Datatype: `string` - Default Value: `None` - - ### Target Base Source Number Base @@ -39,10 +37,6 @@ Source Number Base - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Numeric/compareNumbers.md b/docs/build/reference/transformer/Numeric/compareNumbers.md index 015830048..470653f60 100644 --- a/docs/build/reference/transformer/Numeric/compareNumbers.md +++ b/docs/build/reference/transformer/Numeric/compareNumbers.md @@ -8,14 +8,11 @@ tags: # Compare numbers - - Compares the numbers of two sets. Returns 1 if the comparison yields true and 0 otherwise. If there are multiple numbers in both sets, the comparator must be true for all numbers. For instance, {1,2} < {2,3} yields 0 as not all numbers in the first set are smaller than in the second. - ## Parameter ### Comparator @@ -26,10 +23,6 @@ No description - Datatype: `enumeration` - Default Value: `<` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Numeric/extractPhysicalQuantity.md b/docs/build/reference/transformer/Numeric/extractPhysicalQuantity.md index a35b4977f..39d70530e 100644 --- a/docs/build/reference/transformer/Numeric/extractPhysicalQuantity.md +++ b/docs/build/reference/transformer/Numeric/extractPhysicalQuantity.md @@ -8,14 +8,11 @@ tags: # Extract physical quantity - - Extracts physical quantities, such as length or weight values. Values are expected of the form `{Number}{UnitPrefix}{Symbol}` and are converted to the base unit. Example: Let a value such as `"10km, 3mg"`, containing both a distance and a weight, be given. If the `symbol` parameter is set to `m`, then the extracted value will be `1000` (i.e. the distance). If, instead, the `symbol` parameter is set to `g`, then the extracted value will be `0.001` (i.e. the weight). - ## Parameter ### Symbol @@ -26,8 +23,6 @@ The symbol of the dimension, e.g., 'm' for meter. - Datatype: `string` - Default Value: `None` - - ### Number format The IETF BCP 47 language tag, e.g. 'en'. @@ -36,8 +31,6 @@ The IETF BCP 47 language tag, e.g. 'en'. - Datatype: `string` - Default Value: `en` - - ### Filter Only extracts from values that contain the given regex (case-insensitive). @@ -46,8 +39,6 @@ Only extracts from values that contain the given regex (case-insensitive). - Datatype: `string` - Default Value: `None` - - ### Index If there are multiple matches, retrieve the value with the given index (zero-based). @@ -56,10 +47,6 @@ If there are multiple matches, retrieve the value with the given index (zero-bas - Datatype: `int` - Default Value: `0` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Numeric/formatNumber.md b/docs/build/reference/transformer/Numeric/formatNumber.md index 85e65115f..aa2397b05 100644 --- a/docs/build/reference/transformer/Numeric/formatNumber.md +++ b/docs/build/reference/transformer/Numeric/formatNumber.md @@ -8,13 +8,9 @@ tags: # Format number - - - Formats a number according to a user-defined pattern. The pattern syntax is documented at: - https://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html - + ## Examples @@ -31,7 +27,6 @@ tags: * Returns: `[001]` - --- **Example 2:** @@ -43,7 +38,6 @@ tags: * Returns: `[000123.780]` - --- **Example 3:** @@ -55,7 +49,6 @@ tags: * Returns: `[123,456.789]` - --- **Example 4:** @@ -68,7 +61,6 @@ tags: * Returns: `[123.456,789]` - --- **Example 5:** @@ -80,7 +72,6 @@ tags: * Returns: `[10 apples]` - --- **Example 6:** @@ -92,7 +83,6 @@ tags: * Returns: `[0010]` - --- **Example 7:** @@ -104,7 +94,6 @@ tags: * Returns: `[1]` - --- **Example 8:** @@ -116,33 +105,24 @@ tags: * Returns: `[123.4]` - - - ## Parameter ### Pattern No description -- ID: `pattern` -- Datatype: `string` -- Default Value: `None` - - +* ID: `pattern` +* Datatype: `string` +* Default Value: `None` ### Locale No description -- ID: `locale` -- Datatype: `string` -- Default Value: `en` - - - - +* ID: `locale` +* Datatype: `string` +* Default Value: `en` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Numeric/log.md b/docs/build/reference/transformer/Numeric/log.md index 342684e47..9cd328193 100644 --- a/docs/build/reference/transformer/Numeric/log.md +++ b/docs/build/reference/transformer/Numeric/log.md @@ -8,11 +8,8 @@ tags: # Logarithm - - Transforms all numbers by applying the logarithm function. Non-numeric values are left unchanged. - ## Parameter ### Base @@ -23,10 +20,6 @@ No description - Datatype: `int` - Default Value: `10` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Numeric/numOperation.md b/docs/build/reference/transformer/Numeric/numOperation.md index 03c9afe3f..245c6e58e 100644 --- a/docs/build/reference/transformer/Numeric/numOperation.md +++ b/docs/build/reference/transformer/Numeric/numOperation.md @@ -8,8 +8,6 @@ tags: # Numeric operation - - The `numOperation` plugin applies one of the four basic arithmetic operators to the sequence of input values. These are the fundamental operations of **addition** (`+`), **subtraction** (`-`), **multiplication** (`*`) and **division** (`/`). @@ -42,7 +40,6 @@ numbers, a validation exception will be raised. * Returns: `[2.0]` - --- **Example 2:** @@ -55,7 +52,6 @@ numbers, a validation exception will be raised. * Returns: `[0.0]` - --- **Example 3:** @@ -68,7 +64,6 @@ numbers, a validation exception will be raised. * Returns: `[30.0]` - --- **Example 4:** @@ -81,7 +76,6 @@ numbers, a validation exception will be raised. * Returns: `[2.5]` - --- **Example 5:** @@ -94,7 +88,6 @@ numbers, a validation exception will be raised. * Returns: `[]` - --- **Example 6:** @@ -107,7 +100,6 @@ numbers, a validation exception will be raised. * Returns: `[1.0]` - --- **Example 7:** @@ -120,7 +112,6 @@ numbers, a validation exception will be raised. * Returns: `[3.0]` - --- **Example 8:** @@ -133,23 +124,16 @@ numbers, a validation exception will be raised. * Returns: `[Infinity]` - - - ## Parameter ### Operator The operator to be applied to all values. One of `+`, `-`, `*`, `/` -- ID: `operator` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `operator` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Numeric/numReduce.md b/docs/build/reference/transformer/Numeric/numReduce.md index 3ed0c3236..7c15ceb1c 100644 --- a/docs/build/reference/transformer/Numeric/numReduce.md +++ b/docs/build/reference/transformer/Numeric/numReduce.md @@ -8,8 +8,6 @@ tags: # Numeric reduce - - Strip all non-numeric characters from a string. ## Examples @@ -27,7 +25,6 @@ Strip all non-numeric characters from a string. * Returns: `[12]` - --- **Example 2:** @@ -39,23 +36,16 @@ Strip all non-numeric characters from a string. * Returns: `[1.2]` - - - ## Parameter ### Keep punctuation No description -- ID: `keepPunctuation` -- Datatype: `boolean` -- Default Value: `true` - - - - +* ID: `keepPunctuation` +* Datatype: `boolean` +* Default Value: `true` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Parser/DateTypeParser.md b/docs/build/reference/transformer/Parser/DateTypeParser.md index d702de51c..67b152178 100644 --- a/docs/build/reference/transformer/Parser/DateTypeParser.md +++ b/docs/build/reference/transformer/Parser/DateTypeParser.md @@ -8,8 +8,6 @@ tags: # Parse date - - Parses and normalizes dates in different formats. ## Examples @@ -28,7 +26,6 @@ Parses and normalizes dates in different formats. * Returns: `[1999-03-20]` - --- **Example 2:** @@ -41,7 +38,6 @@ Parses and normalizes dates in different formats. * Returns: `[20.03.1999]` - --- **Example 3:** @@ -54,7 +50,6 @@ Parses and normalizes dates in different formats. * Returns: `[2017-04-04]` - --- **Example 4:** @@ -67,7 +62,6 @@ Parses and normalizes dates in different formats. * Returns: `[2017-04-04]` - --- **Example 5:** @@ -80,7 +74,6 @@ Parses and normalizes dates in different formats. * Returns: `[24-Jun-2021 14:50:05 +02:00]` - --- **Example 6:** @@ -93,7 +86,6 @@ Parses and normalizes dates in different formats. * Returns: `[24-Dez.-2021 14:50:05 +02:00]` - --- **Example 7:** @@ -106,7 +98,6 @@ Parses and normalizes dates in different formats. * Returns: `[1999-03-20T20:34.44]` - --- **Example 8:** @@ -119,7 +110,6 @@ Parses and normalizes dates in different formats. * Returns: `[12:20:00.000]` - --- **Example 9:** @@ -132,7 +122,6 @@ Parses and normalizes dates in different formats. * Returns: `[--01]` - --- **Example 10:** @@ -145,7 +134,6 @@ Parses and normalizes dates in different formats. * Returns: `[---31]` - --- **Example 11:** @@ -158,7 +146,6 @@ Parses and normalizes dates in different formats. * Returns: `[--12-31]` - --- **Example 12:** @@ -172,7 +159,6 @@ Parses and normalizes dates in different formats. * Returns: `[]` * **Throws error:** `DateTimeException` - --- **Example 13:** @@ -185,7 +171,6 @@ Parses and normalizes dates in different formats. * Returns: `[2020-02-22T16:34:14]` - --- **Example 14:** @@ -200,7 +185,6 @@ Parses and normalizes dates in different formats. * Returns: `[24-Dez.-2021 14:50:05 +02:00]` - --- **Example 15:** @@ -215,7 +199,6 @@ Parses and normalizes dates in different formats. * Returns: `[24-Dec-2021 14:50:05 +02:00]` - --- **Example 16:** @@ -232,7 +215,6 @@ Parses and normalizes dates in different formats. * Returns: `[déc. 2021]` - --- **Example 17:** @@ -247,73 +229,56 @@ Parses and normalizes dates in different formats. * Returns: `[Februar, 2024]` - - - ## Parameter ### Input format The input date/time format used for parsing the date/time string. -- ID: `inputDateFormatId` -- Datatype: `option[enumeration]` -- Default Value: `w3c Date` - - +* ID: `inputDateFormatId` +* Datatype: `option[enumeration]` +* Default Value: `w3c Date` ### Alternative input format An input format string that should be used instead of the selected input format. Java DateFormat string. -- ID: `alternativeInputFormat` -- Datatype: `string` -- Default Value: `None` - - +* ID: `alternativeInputFormat` +* Datatype: `string` +* Default Value: `None` ### Alternative input locale Optional locale for the (alternative) input format. If not set the system's locale will be used or the locale of the input format, if set. -- ID: `inputLocale` -- Datatype: `option[locale]` -- Default Value: `None` - - +* ID: `inputLocale` +* Datatype: `option[locale]` +* Default Value: `None` ### Output format The output date/time format used for parsing the date/time string. -- ID: `outputDateFormatId` -- Datatype: `option[enumeration]` -- Default Value: `w3c Date` - - +* ID: `outputDateFormatId` +* Datatype: `option[enumeration]` +* Default Value: `w3c Date` ### Alternative output format An output format string that should be used instead of the selected output format. Java DateFormat string. -- ID: `alternativeOutputFormat` -- Datatype: `string` -- Default Value: `None` - - +* ID: `alternativeOutputFormat` +* Datatype: `string` +* Default Value: `None` ### Alternative output locale Optional locale for the (alternative) output format. If not set the system's locale will be used or the locale of the output format, if set. -- ID: `outputLocale` -- Datatype: `option[locale]` -- Default Value: `None` - - - - +* ID: `outputLocale` +* Datatype: `option[locale]` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Parser/FloatTypeParser.md b/docs/build/reference/transformer/Parser/FloatTypeParser.md index da57a8591..2e47c616c 100644 --- a/docs/build/reference/transformer/Parser/FloatTypeParser.md +++ b/docs/build/reference/transformer/Parser/FloatTypeParser.md @@ -8,11 +8,8 @@ tags: # Parse float - - Parses and normalizes float values. - ## Parameter ### Comma as decimal point @@ -23,8 +20,6 @@ No description - Datatype: `boolean` - Default Value: `false` - - ### Thousand separator No description @@ -33,8 +28,6 @@ No description - Datatype: `boolean` - Default Value: `false` - - ### Brackets for negative No description @@ -43,10 +36,6 @@ No description - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Parser/GeoCoordinateParser.md b/docs/build/reference/transformer/Parser/GeoCoordinateParser.md index 52bd11745..fc3ecc2f7 100644 --- a/docs/build/reference/transformer/Parser/GeoCoordinateParser.md +++ b/docs/build/reference/transformer/Parser/GeoCoordinateParser.md @@ -8,15 +8,12 @@ tags: # Parse geo coordinate - - Parses and normalizes geo coordinates. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Parser/GeoLocationParser.md b/docs/build/reference/transformer/Parser/GeoLocationParser.md index cb89bc1e8..0cd11cfa7 100644 --- a/docs/build/reference/transformer/Parser/GeoLocationParser.md +++ b/docs/build/reference/transformer/Parser/GeoLocationParser.md @@ -8,11 +8,8 @@ tags: # Parse geo location - - Parses and normalizes geo locations like continents, countries, states and cities. - ## Parameter ### Parse type id @@ -23,8 +20,6 @@ What type of location should be parsed. - Datatype: `enumeration` - Default Value: `None` - - ### Full state name Set to true if the full state name should be output instead of the 2-letter code. @@ -33,10 +28,6 @@ Set to true if the full state name should be output instead of the 2-letter code - Datatype: `boolean` - Default Value: `true` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Parser/IntegerParser.md b/docs/build/reference/transformer/Parser/IntegerParser.md index 63f6d8907..7f37f0098 100644 --- a/docs/build/reference/transformer/Parser/IntegerParser.md +++ b/docs/build/reference/transformer/Parser/IntegerParser.md @@ -8,8 +8,6 @@ tags: # Parse integer - - Parses integer values. ## Examples @@ -28,7 +26,6 @@ Parses integer values. * Returns: `[1000]` - --- **Example 2:** @@ -41,7 +38,6 @@ Parses integer values. * Returns: `[1000]` - --- **Example 3:** @@ -54,7 +50,6 @@ Parses integer values. * Returns: `[1000]` - --- **Example 4:** @@ -67,33 +62,24 @@ Parses integer values. * Returns: `[1000]` - - - ## Parameter ### Comma as decimal point Use comma or point (default) as a decimal separator. -- ID: `commaAsDecimalPoint` -- Datatype: `boolean` -- Default Value: `false` - - +* ID: `commaAsDecimalPoint` +* Datatype: `boolean` +* Default Value: `false` ### Thousand separator Presence of a thousands separator (default: absence), compatible with the chosen decimal separator. -- ID: `thousandSeparator` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `thousandSeparator` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Parser/IsinParser.md b/docs/build/reference/transformer/Parser/IsinParser.md index 8ce1a4e43..40c264b65 100644 --- a/docs/build/reference/transformer/Parser/IsinParser.md +++ b/docs/build/reference/transformer/Parser/IsinParser.md @@ -8,15 +8,12 @@ tags: # Parse ISIN - - Parses International Securities Identification Numbers (ISIN) values and fails if the String is no valid ISIN. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Parser/SkosTypeParser.md b/docs/build/reference/transformer/Parser/SkosTypeParser.md index d8f7b29b8..658bd36bd 100644 --- a/docs/build/reference/transformer/Parser/SkosTypeParser.md +++ b/docs/build/reference/transformer/Parser/SkosTypeParser.md @@ -8,11 +8,8 @@ tags: # Parse SKOS term - - Parses values from a SKOS ontology. - ## Parameter ### Surface form to representation mapping @@ -23,10 +20,6 @@ No description - Datatype: `stringmap` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Parser/StringParser.md b/docs/build/reference/transformer/Parser/StringParser.md index 19a76ee46..df6c8f0f6 100644 --- a/docs/build/reference/transformer/Parser/StringParser.md +++ b/docs/build/reference/transformer/Parser/StringParser.md @@ -8,8 +8,6 @@ tags: # Parse string - - Parses string values. This is basically an identity function. ## Examples @@ -24,7 +22,6 @@ Parses string values. This is basically an identity function. * Returns: `[eccenca]` - --- **Example 2:** @@ -33,13 +30,10 @@ Parses string values. This is basically an identity function. * Returns: `[these, strings, won't, be, changed, at, all]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Replace/excelMap.md b/docs/build/reference/transformer/Replace/excelMap.md index b5feed11c..2d2629b09 100644 --- a/docs/build/reference/transformer/Replace/excelMap.md +++ b/docs/build/reference/transformer/Replace/excelMap.md @@ -8,8 +8,6 @@ tags: # Excel map - - Replaces values based on a map of values read from a file in Open XML format (XLSX). The XLSX file may contain several sheets of the form: @@ -22,8 +20,6 @@ An empty string can be created in Excel and alternatives by inserting `=""` in t If there are multiple values for a single key, all values will be returned for the given key. Note that the mapping table will be cached in memory. If the Excel file is updated (even while transforming), the map will be reloaded within seconds. - - ## Parameter @@ -35,8 +31,6 @@ Excel file inside the resources directory containing one or more sheets with map - Datatype: `resource` - Default Value: `None` - - ### Sheet name The sheet that contains the mapping table or empty if the first sheet should be taken. @@ -45,8 +39,6 @@ The sheet that contains the mapping table or empty if the first sheet should be - Datatype: `string` - Default Value: `None` - - ### Skip lines How many rows to skip before reading the mapping table. By default the expected header row is skipped. @@ -55,8 +47,6 @@ How many rows to skip before reading the mapping table. By default the expected - Datatype: `int` - Default Value: `1` - - ### Strict If set to true, the operator throws validation errors for values it cannot map. If set to false, the chosen conflict strategy will be applied for missing values. @@ -65,8 +55,6 @@ If set to true, the operator throws validation errors for values it cannot map. - Datatype: `boolean` - Default Value: `true` - - ### Conflict strategy Determines how values that cannot be found in the mapping table are treated. Only has an effect if 'strict' is set to false. If 'retain' is chosen, the original value will be forwarded. If 'remove' is chosen, no value will be output. @@ -75,10 +63,6 @@ Determines how values that cannot be found in the mapping table are treated. Onl - Datatype: `enumeration` - Default Value: `retain` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Replace/map.md b/docs/build/reference/transformer/Replace/map.md index 7ee0d0a5b..f71f0837d 100644 --- a/docs/build/reference/transformer/Replace/map.md +++ b/docs/build/reference/transformer/Replace/map.md @@ -8,8 +8,6 @@ tags: # Map - - Replaces values based on a map of values. ## Examples @@ -28,7 +26,6 @@ Replaces values based on a map of values. * Returns: `[Value1]` - --- **Example 2:** @@ -41,33 +38,24 @@ Replaces values based on a map of values. * Returns: `[Undefined]` - - - ## Parameter ### Map A map of values -- ID: `map` -- Datatype: `stringmap` -- Default Value: `None` - - +* ID: `map` +* Datatype: `stringmap` +* Default Value: `None` ### Default Default if the map defines no value -- ID: `default` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `default` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Replace/mapWithDefaultInput.md b/docs/build/reference/transformer/Replace/mapWithDefaultInput.md index c8c818dd4..bdd85dd03 100644 --- a/docs/build/reference/transformer/Replace/mapWithDefaultInput.md +++ b/docs/build/reference/transformer/Replace/mapWithDefaultInput.md @@ -8,34 +8,28 @@ tags: # Map with default - - This transformer requires a _map of values_, when created. This can be a map such as `"A:1,B:2,C:3"`, representing the mapping between the first three letters and the corresponding numbers (i.e. `A` to `1`, `B` to `2` and `C` to `3`). The transformer requires _two_ input value sequences, when called: the first sequence of values are the _values to map_, and the second is a sequence of _default values_. With these parameterization and applied value sequences, the transformer then works in the following way: + * The _map of values_ (specified when the transformer is _created_) is used for _obtaining_ values from the transformer. * The _values to map_ (specified when the transformer is _called_) is used for _mapping_ values by the transformer. * The _default values_ (specified when the transformer is _called_, as a mandatory second argument) is used as a backup sequence of values, in case the (first) value to map is not found within the map of values. It is simply a default. Normally, the sequence of _default values_ is expected to have the same size as the _values to map_ (i.e. the two sequences provided when _calling_ the transformer are supposed to be compatible). Additionally, in order to provide a certain amount of flexibility: Should that _not_ be the case, if there are _less_ default values than values to map, the _last_ default value is replicated to match the count. This fallback shouldn't be relied upon, since it may result in a somewhat confusing or unexpected behavior. - ## Parameter ### Map A map of values -- ID: `map` -- Datatype: `stringmap` -- Default Value: `None` - - - - +* ID: `map` +* Datatype: `stringmap` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Replace/regexReplace.md b/docs/build/reference/transformer/Replace/regexReplace.md index d1261e1b0..a8ef1a75a 100644 --- a/docs/build/reference/transformer/Replace/regexReplace.md +++ b/docs/build/reference/transformer/Replace/regexReplace.md @@ -8,8 +8,6 @@ tags: # Regex replace - - ## Description The `regexReplace` plugin replaces all occurrences of a regular expression. @@ -61,7 +59,6 @@ _extracts_ all occurrences of the matching. * Returns: `[012]` - --- **Removes all letters by replacing them with an empty string:** @@ -73,7 +70,6 @@ _extracts_ all occurrences of the matching. * Returns: `[1]` - --- **Removes all vowels by replacing them with an empty string:** @@ -85,7 +81,6 @@ _extracts_ all occurrences of the matching. * Returns: `[Dwln, Bln, Kl, Fl, Dr, Nr, r, n, Gln, Bfr, Bfr, Bmbr, Thrn]` - --- **Removes all consonants by replacing them with an empty string:** @@ -97,7 +92,6 @@ _extracts_ all occurrences of the matching. * Returns: `[ai, ai, ii, ii, oi, oi, Oi, Oi, oi, iu, ou, ou, oi]` - --- **Replaces all vowels with a common vowel:** @@ -110,7 +104,6 @@ _extracts_ all occurrences of the matching. * Returns: `[Dwalan, Balan, Kala, Fala, Dara, Nara, ara, aan, Glaan, Bafar, Bafar, Bambar, Tharan]` - --- **Replaces all vowels with a common double vowel:** @@ -123,33 +116,24 @@ _extracts_ all occurrences of the matching. * Returns: `[Dwaalaan, Baalaan, Kaalaa, Faalaa, Daaraa, Naaraa, aaraa, aaaan, Glaaaan, Baafaar, Baafaar, Baambaar, Thaaraan]` - - - ## Parameter ### Regex The regular expression to match -- ID: `regex` -- Datatype: `string` -- Default Value: `None` - - +* ID: `regex` +* Datatype: `string` +* Default Value: `None` ### Replace The replacement of each match -- ID: `replace` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `replace` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Replace/replace.md b/docs/build/reference/transformer/Replace/replace.md index 4dd13c6b4..492f995bf 100644 --- a/docs/build/reference/transformer/Replace/replace.md +++ b/docs/build/reference/transformer/Replace/replace.md @@ -8,8 +8,6 @@ tags: # Replace - - Replace all occurrences of a string with another string. ## Examples @@ -28,7 +26,6 @@ Replace all occurrences of a string with another string. * Returns: `[abc]` - --- **Example 2:** @@ -41,33 +38,24 @@ Replace all occurrences of a string with another string. * Returns: `[def]` - - - ## Parameter ### Search The string to search for -- ID: `search` -- Datatype: `string` -- Default Value: `None` - - +* ID: `search` +* Datatype: `string` +* Default Value: `None` ### Replace The replacement of each match -- ID: `replace` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `replace` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Selection/coalesce.md b/docs/build/reference/transformer/Selection/coalesce.md index 1f648a09a..87e7bf5f8 100644 --- a/docs/build/reference/transformer/Selection/coalesce.md +++ b/docs/build/reference/transformer/Selection/coalesce.md @@ -8,8 +8,6 @@ tags: # Coalesce (first non-empty input) - - Forwards the first non-empty input, i.e. for which any value(s) exist. A single empty string is considered a value. ## Examples @@ -26,7 +24,6 @@ Forwards the first non-empty input, i.e. for which any value(s) exist. A single * Returns: `[]` - --- **Example 2:** @@ -36,13 +33,11 @@ Forwards the first non-empty input, i.e. for which any value(s) exist. A single * Returns: `[]` - --- **Example 3:** * Returns: `[]` - --- **Example 4:** @@ -53,7 +48,6 @@ Forwards the first non-empty input, i.e. for which any value(s) exist. A single * Returns: `[first]` - --- **Example 5:** @@ -64,7 +58,6 @@ Forwards the first non-empty input, i.e. for which any value(s) exist. A single * Returns: `[first A, first B]` - --- **Example 6:** @@ -74,13 +67,10 @@ Forwards the first non-empty input, i.e. for which any value(s) exist. A single * Returns: `[first]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Selection/regexSelect.md b/docs/build/reference/transformer/Selection/regexSelect.md index e32449669..13386fdb7 100644 --- a/docs/build/reference/transformer/Selection/regexSelect.md +++ b/docs/build/reference/transformer/Selection/regexSelect.md @@ -8,8 +8,6 @@ tags: # Regex selection - - ## Description of the plugin This transformer takes _three_ inputs: a single _output value_, a sequence of _regular expressions_ and a sequence of @@ -69,7 +67,6 @@ take, `validateRegex` is used for _validating_ the input, `regexReplace` _replac * Returns: `[output, , output]` - --- **return only first match position if oneOnly = true:** @@ -83,23 +80,16 @@ take, `validateRegex` is used for _validating_ the input, `regexReplace` _replac * Returns: `[output, , ]` - - - ## Parameter ### One only No description -- ID: `oneOnly` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `oneOnly` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Sequence/count.md b/docs/build/reference/transformer/Sequence/count.md index 944eb88d9..985d51a4b 100644 --- a/docs/build/reference/transformer/Sequence/count.md +++ b/docs/build/reference/transformer/Sequence/count.md @@ -8,8 +8,6 @@ tags: # Count values - - Counts the number of values. ## Examples @@ -24,7 +22,6 @@ Counts the number of values. * Returns: `[1]` - --- **Example 2:** @@ -33,13 +30,10 @@ Counts the number of values. * Returns: `[2]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Sequence/getValueByIndex.md b/docs/build/reference/transformer/Sequence/getValueByIndex.md index 5e122f0fe..ff3fa67ec 100644 --- a/docs/build/reference/transformer/Sequence/getValueByIndex.md +++ b/docs/build/reference/transformer/Sequence/getValueByIndex.md @@ -8,8 +8,6 @@ tags: # Get value by index - - Returns the value found at the specified index. Fails or returns an empty result depending on failIfNoFound is set or not. Please be aware that this will work only if the data source supports some kind of ordering like XML or JSON. This is probably not a good idea to do with RDF models. @@ -17,7 +15,6 @@ Returns the value found at the specified index. Fails or returns an empty result If emptyStringToEmptyResult is true then instead of a result with an empty String, an empty result is returned. - ## Parameter ### Index @@ -28,8 +25,6 @@ No description - Datatype: `int` - Default Value: `None` - - ### Fail if not found No description @@ -38,8 +33,6 @@ No description - Datatype: `boolean` - Default Value: `false` - - ### Empty string to empty result No description @@ -48,10 +41,6 @@ No description - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Sequence/sort.md b/docs/build/reference/transformer/Sequence/sort.md index ed352f5db..2eb402732 100644 --- a/docs/build/reference/transformer/Sequence/sort.md +++ b/docs/build/reference/transformer/Sequence/sort.md @@ -8,8 +8,6 @@ tags: # Sort - - Sorts values lexicographically. ## Examples @@ -24,7 +22,6 @@ Sorts values lexicographically. * Returns: `[]` - --- **Example 2:** @@ -33,7 +30,6 @@ Sorts values lexicographically. * Returns: `[a, b, c]` - --- **Example 3:** @@ -42,13 +38,10 @@ Sorts values lexicographically. * Returns: `[Hamburg, Hans, Hansa]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Sequence/toSequenceIndex.md b/docs/build/reference/transformer/Sequence/toSequenceIndex.md index e83a72a9d..a5567d215 100644 --- a/docs/build/reference/transformer/Sequence/toSequenceIndex.md +++ b/docs/build/reference/transformer/Sequence/toSequenceIndex.md @@ -8,8 +8,6 @@ tags: # Sequence values to indexes - - Transforms the sequence of values to their respective indexes in the sequence. If there is more than one input, the values are numbered from the first input on and continued for the next inputs. Applied against an RDF source the order might not be deterministic. ## Examples @@ -24,13 +22,10 @@ Transforms the sequence of values to their respective indexes in the sequence. I * Returns: `[0, 1, 2]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Substring/stripPostfix.md b/docs/build/reference/transformer/Substring/stripPostfix.md index fad943971..da38d0400 100644 --- a/docs/build/reference/transformer/Substring/stripPostfix.md +++ b/docs/build/reference/transformer/Substring/stripPostfix.md @@ -8,8 +8,6 @@ tags: # Strip postfix - - Strips a postfix of a string. ## Examples @@ -27,7 +25,6 @@ Strips a postfix of a string. * Returns: `[value]` - --- **Example 2:** @@ -39,23 +36,16 @@ Strips a postfix of a string. * Returns: `[Value]` - - - ## Parameter ### Postfix No description -- ID: `postfix` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `postfix` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Substring/stripPrefix.md b/docs/build/reference/transformer/Substring/stripPrefix.md index 14a030d3d..96a808c77 100644 --- a/docs/build/reference/transformer/Substring/stripPrefix.md +++ b/docs/build/reference/transformer/Substring/stripPrefix.md @@ -8,8 +8,6 @@ tags: # Strip prefix - - Strips a prefix of a string. ## Examples @@ -27,7 +25,6 @@ Strips a prefix of a string. * Returns: `[Value]` - --- **Example 2:** @@ -39,23 +36,16 @@ Strips a prefix of a string. * Returns: `[ValueWithoutPrefix]` - - - ## Parameter ### Prefix No description -- ID: `prefix` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `prefix` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Substring/stripUriPrefix.md b/docs/build/reference/transformer/Substring/stripUriPrefix.md index 45327169c..b24361010 100644 --- a/docs/build/reference/transformer/Substring/stripUriPrefix.md +++ b/docs/build/reference/transformer/Substring/stripUriPrefix.md @@ -8,8 +8,6 @@ tags: # Strip URI prefix - - Strips the URI prefix and decodes the remainder based on UTF-8 URL decoding (using java.net.URLDecoder). Leaves values unchanged which are not a valid URI. ## Examples @@ -24,7 +22,6 @@ Strips the URI prefix and decodes the remainder based on UTF-8 URL decoding (usi * Returns: `[value]` - --- **Example 2:** @@ -33,7 +30,6 @@ Strips the URI prefix and decodes the remainder based on UTF-8 URL decoding (usi * Returns: `[value]` - --- **Example 3:** @@ -42,7 +38,6 @@ Strips the URI prefix and decodes the remainder based on UTF-8 URL decoding (usi * Returns: `[encoded välue]` - --- **Example 4:** @@ -51,7 +46,6 @@ Strips the URI prefix and decodes the remainder based on UTF-8 URL decoding (usi * Returns: `[value]` - --- **Example 5:** @@ -60,7 +54,6 @@ Strips the URI prefix and decodes the remainder based on UTF-8 URL decoding (usi * Returns: `[Two words]` - --- **Example 6:** @@ -72,23 +65,16 @@ Strips the URI prefix and decodes the remainder based on UTF-8 URL decoding (usi * Returns: `[Two_words]` - - - ## Parameter ### Decode underscores to spaces If true, underscores will be decoded to spaces. -- ID: `decodeUnderscoresToSpaces` -- Datatype: `boolean` -- Default Value: `true` - - - - +* ID: `decodeUnderscoresToSpaces` +* Datatype: `boolean` +* Default Value: `true` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Substring/substring.md b/docs/build/reference/transformer/Substring/substring.md index ce966705f..d2f6e630e 100644 --- a/docs/build/reference/transformer/Substring/substring.md +++ b/docs/build/reference/transformer/Substring/substring.md @@ -8,8 +8,6 @@ tags: # Substring - - Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). If 'endIndex' is 0 (default), it is ignored and the entire remaining string starting with 'beginIndex' is returned. If 'endIndex' is negative, -endIndex characters are removed from the end. ## Examples @@ -28,7 +26,6 @@ Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). * Returns: `[a]` - --- **Example 2:** @@ -41,7 +38,6 @@ Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). * Returns: `[c]` - --- **Example 3:** @@ -54,7 +50,6 @@ Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). * Returns: `[]` - --- **Example 4:** @@ -68,7 +63,6 @@ Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). * Returns: `[c]` * **Throws error:** `ValidationException` - --- **Example 5:** @@ -82,7 +76,6 @@ Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). * Returns: `[c]` - --- **Example 6:** @@ -96,7 +89,6 @@ Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). * Returns: `[]` - --- **Example 7:** @@ -109,7 +101,6 @@ Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). * Returns: `[ab]` - --- **Example 8:** @@ -122,43 +113,32 @@ Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). * Returns: `[bc]` - - - ## Parameter ### Begin index The beginning index, inclusive. -- ID: `beginIndex` -- Datatype: `int` -- Default Value: `0` - - +* ID: `beginIndex` +* Datatype: `int` +* Default Value: `0` ### End index The end index, exclusive. Ignored if set to 0, i.e., the entire remaining string starting with 'beginIndex' is returned. If negative, -endIndex characters are removed from the end -- ID: `endIndex` -- Datatype: `int` -- Default Value: `0` - - +* ID: `endIndex` +* Datatype: `int` +* Default Value: `0` ### String must be in range If true, only strings will be accepted that are within the start and end indices, throwing a validating error if an index is out of range. -- ID: `stringMustBeInRange` -- Datatype: `boolean` -- Default Value: `true` - - - - +* ID: `stringMustBeInRange` +* Datatype: `boolean` +* Default Value: `true` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Substring/untilCharacter.md b/docs/build/reference/transformer/Substring/untilCharacter.md index 276bfd745..2ed6f9930 100644 --- a/docs/build/reference/transformer/Substring/untilCharacter.md +++ b/docs/build/reference/transformer/Substring/untilCharacter.md @@ -8,8 +8,6 @@ tags: # Until character - - Extracts the substring until the character given. ## Examples @@ -27,7 +25,6 @@ Extracts the substring until the character given. * Returns: `[ab]` - --- **Example 2:** @@ -39,23 +36,16 @@ Extracts the substring until the character given. * Returns: `[abab]` - - - ## Parameter ### Until character No description -- ID: `untilCharacter` -- Datatype: `char` -- Default Value: `None` - - - - +* ID: `untilCharacter` +* Datatype: `char` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Template/TemplateTransformer.md b/docs/build/reference/transformer/Template/TemplateTransformer.md index dd6111b6e..1354b7446 100644 --- a/docs/build/reference/transformer/Template/TemplateTransformer.md +++ b/docs/build/reference/transformer/Template/TemplateTransformer.md @@ -8,8 +8,6 @@ tags: # Evaluate template - - Evaluates a template. Input values can be addressed using the variables 'input1', 'input2', etc. Global variables are available in the 'global' scope, e.g., 'global.myVar'. ## Examples @@ -20,7 +18,8 @@ Evaluates a template. Input values can be addressed using the variables 'input1' **Example 1:** * Parameters - * template: + * template: + ``` Hello {{input1}} {{input2}}, @@ -31,14 +30,14 @@ Evaluates a template. Input values can be addressed using the variables 'input1' 1. `[John]` 2. `[Doe]` -* Returns: +* Returns: + ``` [Hello John Doe, How are you today?] ``` - --- **Example 2:** @@ -52,7 +51,6 @@ Evaluates a template. Input values can be addressed using the variables 'input1' * Returns: `[]` * **Throws error:** `ValidationException` - --- **Example 3:** @@ -62,7 +60,6 @@ Evaluates a template. Input values can be addressed using the variables 'input1' * Returns: `[]` * **Throws error:** `ValidationException` - --- **Example 4:** @@ -72,7 +69,6 @@ Evaluates a template. Input values can be addressed using the variables 'input1' * Returns: `[]` * **Throws error:** `UnboundVariablesException` - --- **Example 5:** @@ -84,7 +80,6 @@ Evaluates a template. Input values can be addressed using the variables 'input1' * Returns: `[Hello AB]` - --- **Example 6:** @@ -96,33 +91,24 @@ Evaluates a template. Input values can be addressed using the variables 'input1' * Returns: `[Hello Bob, Eve, how are you doing?]` - - - ## Parameter ### Template The template -- ID: `template` -- Datatype: `template` -- Default Value: `None` - - +* ID: `template` +* Datatype: `template` +* Default Value: `None` ### Language The template language. Currently, Jinja is supported. -- ID: `language` -- Datatype: `string` -- Default Value: `jinja` - - - - +* ID: `language` +* Datatype: `string` +* Default Value: `jinja` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Tokenization/camelcasetokenizer.md b/docs/build/reference/transformer/Tokenization/camelcasetokenizer.md index bfd67cf2e..2a6abb972 100644 --- a/docs/build/reference/transformer/Tokenization/camelcasetokenizer.md +++ b/docs/build/reference/transformer/Tokenization/camelcasetokenizer.md @@ -8,8 +8,6 @@ tags: # Camel case tokenizer - - Tokenizes a camel case string. That is it splits strings between a lower case character and an upper case character. ## Examples @@ -24,7 +22,6 @@ Tokenizes a camel case string. That is it splits strings between a lower case ch * Returns: `[camel, Case, String]` - --- **Example 2:** @@ -33,13 +30,10 @@ Tokenizes a camel case string. That is it splits strings between a lower case ch * Returns: `[nocamelcase]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Tokenization/tokenize.md b/docs/build/reference/transformer/Tokenization/tokenize.md index 61cad6aae..e3deed86e 100644 --- a/docs/build/reference/transformer/Tokenization/tokenize.md +++ b/docs/build/reference/transformer/Tokenization/tokenize.md @@ -8,8 +8,6 @@ tags: # Tokenize - - Tokenizes all input values. ## Examples @@ -24,7 +22,6 @@ Tokenizes all input values. * Returns: `[Hello, World]` - --- **Optionally, splits values at the provided regex:** @@ -36,23 +33,16 @@ Tokenizes all input values. * Returns: `[.175, .050]` - - - ## Parameter ### Regex The regular expression used to split values. -- ID: `regex` -- Datatype: `string` -- Default Value: `\s` - - - - +* ID: `regex` +* Datatype: `string` +* Default Value: `\s` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Uncategorized/cmem-plugin-jq-transform.md b/docs/build/reference/transformer/Uncategorized/cmem-plugin-jq-transform.md index 395438520..732cd688a 100644 --- a/docs/build/reference/transformer/Uncategorized/cmem-plugin-jq-transform.md +++ b/docs/build/reference/transformer/Uncategorized/cmem-plugin-jq-transform.md @@ -15,14 +15,13 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - > [jq](https://jqlang.org/) is like sed for JSON data - you can use it to > slice and filter and map and transform structured data with the same ease that sed, awk, > grep and friends let you play with text. In order to test jq expressions, you can use [play.jqlang.org](https://play.jqlang.org/). -## Basic concepts: +## Basic concepts - Filters separated by a comma will produce multiple independent outputs: `,` - Will ignores error if the type is unexpected: `?` @@ -74,8 +73,6 @@ Types can be `arrays`, `objects`, `iterables`, `booleans`, `numbers`, `normals`, - Remove duplicates: `unique` or `unique_by(.foo)` or `unique_by(length)` - Reverse an array: `reverse` - - ## Parameter ### jq Expression @@ -86,19 +83,10 @@ The jq program to apply to the input JSON string. - Datatype: `string` - Default Value: `.` - - - - ## Advanced Parameter ### Output list with one item as string - - - ID: `single_item_as_string` - Datatype: `boolean` - Default Value: `true` - - - diff --git a/docs/build/reference/transformer/Uncategorized/cmem_plugin_currencies-transform.md b/docs/build/reference/transformer/Uncategorized/cmem_plugin_currencies-transform.md index 5074ae215..bb7dadb96 100644 --- a/docs/build/reference/transformer/Uncategorized/cmem_plugin_currencies-transform.md +++ b/docs/build/reference/transformer/Uncategorized/cmem_plugin_currencies-transform.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - This transform plugin allows you to convert currencies from one currency to another. It uses the Euro foreign exchange reference rates from the [European Central Bank](https://www.ecb.europa.eu/stats/policy_and_exchange_rates/euro_reference_exchange_rates/html/index.en.html) @@ -52,7 +51,6 @@ can be used with the plugin. Please be aware that not all of the rates are available for all dates (e.g. after 2022-03-01 there is no RUB rate available anymore). - ## Parameter ### 1. Source Currency @@ -63,8 +61,6 @@ The currency code you want to convert from (e.g. USD). - Datatype: `string` - Default Value: `USD` - - ### 2. Date Set date (e.g.YYYY-MM-DD) to convert currencies based on historic rates. @@ -73,8 +69,6 @@ Set date (e.g.YYYY-MM-DD) to convert currencies based on historic rates. - Datatype: `string` - Default Value: `2025-11-26` - - ### 3. Target Currency Enter the currency code you want to convert to (e.g.USD). @@ -83,10 +77,6 @@ Enter the currency code you want to convert to (e.g.USD). - Datatype: `string` - Default Value: `EUR` - - - - ## Advanced Parameter ### Debug Output @@ -96,6 +86,3 @@ Instead of plain values, output additional background information. - ID: `debug` - Datatype: `boolean` - Default Value: `false` - - - diff --git a/docs/build/reference/transformer/Validation/validateDateAfter.md b/docs/build/reference/transformer/Validation/validateDateAfter.md index 71e8a995b..4d49ee3ef 100644 --- a/docs/build/reference/transformer/Validation/validateDateAfter.md +++ b/docs/build/reference/transformer/Validation/validateDateAfter.md @@ -8,8 +8,6 @@ tags: # Validate date after - - Validates if the first input date is after the second input date. Outputs the first input if the validation is successful. ## Examples @@ -26,7 +24,6 @@ Validates if the first input date is after the second input date. Outputs the fi * Returns: `[]` * **Throws error:** `ValidationException` - --- **Example 2:** @@ -36,7 +33,6 @@ Validates if the first input date is after the second input date. Outputs the fi * Returns: `[2015-04-04]` - --- **Example 3:** @@ -49,7 +45,6 @@ Validates if the first input date is after the second input date. Outputs the fi * Returns: `[2015-04-03]` - --- **Example 4:** @@ -63,23 +58,16 @@ Validates if the first input date is after the second input date. Outputs the fi * Returns: `[]` * **Throws error:** `ValidationException` - - - ## Parameter ### Allow equal Allow both dates to be equal. -- ID: `allowEqual` -- Datatype: `boolean` -- Default Value: `false` - - - - +* ID: `allowEqual` +* Datatype: `boolean` +* Default Value: `false` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Validation/validateDateRange.md b/docs/build/reference/transformer/Validation/validateDateRange.md index 3230371fc..c34db406e 100644 --- a/docs/build/reference/transformer/Validation/validateDateRange.md +++ b/docs/build/reference/transformer/Validation/validateDateRange.md @@ -8,11 +8,8 @@ tags: # Validate date range - - Validates if dates are within a specified range. - ## Parameter ### Min date @@ -23,8 +20,6 @@ Earliest allowed date in YYYY-MM-DD - Datatype: `string` - Default Value: `None` - - ### Max date Latest allowed data in YYYY-MM-DD @@ -33,10 +28,6 @@ Latest allowed data in YYYY-MM-DD - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Validation/validateNumberOfValues.md b/docs/build/reference/transformer/Validation/validateNumberOfValues.md index eab407033..bf7c404af 100644 --- a/docs/build/reference/transformer/Validation/validateNumberOfValues.md +++ b/docs/build/reference/transformer/Validation/validateNumberOfValues.md @@ -8,8 +8,6 @@ tags: # Validate number of values - - Validates that the number of values lies in a specified range. ## Examples @@ -28,7 +26,6 @@ Validates that the number of values lies in a specified range. * Returns: `[value1]` - --- **Example 2:** @@ -42,33 +39,24 @@ Validates that the number of values lies in a specified range. * Returns: `[]` * **Throws error:** `ValidationException` - - - ## Parameter ### Min Minimum allowed number of values -- ID: `min` -- Datatype: `int` -- Default Value: `0` - - +* ID: `min` +* Datatype: `int` +* Default Value: `0` ### Max Maximum allowed number of values -- ID: `max` -- Datatype: `int` -- Default Value: `1` - - - - +* ID: `max` +* Datatype: `int` +* Default Value: `1` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Validation/validateNumericRange.md b/docs/build/reference/transformer/Validation/validateNumericRange.md index 6d21e6720..177009089 100644 --- a/docs/build/reference/transformer/Validation/validateNumericRange.md +++ b/docs/build/reference/transformer/Validation/validateNumericRange.md @@ -8,11 +8,8 @@ tags: # Validate numeric range - - Validates if a number is within a specified range. - ## Parameter ### Min @@ -23,8 +20,6 @@ Minimum allowed number - Datatype: `double` - Default Value: `None` - - ### Max Maximum allowed number @@ -33,10 +28,6 @@ Maximum allowed number - Datatype: `double` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Validation/validateRegex.md b/docs/build/reference/transformer/Validation/validateRegex.md index 71034db03..3b7495118 100644 --- a/docs/build/reference/transformer/Validation/validateRegex.md +++ b/docs/build/reference/transformer/Validation/validateRegex.md @@ -8,8 +8,6 @@ tags: # Validate regex - - ## Description The `validateRegex` plugin validates whether all values match a given regular expression. @@ -61,7 +59,6 @@ _extracts_ them. * Returns: `[TestValue123]` - --- **Example 2:** @@ -73,7 +70,6 @@ _extracts_ them. * Returns: `[abcd]` - --- **Example 3:** @@ -85,7 +81,6 @@ _extracts_ them. * Returns: `[Prefix abc]` - --- **Example 4:** @@ -98,7 +93,6 @@ _extracts_ them. * Returns: `[]` * **Throws error:** `ValidationException` - --- **Example 5:** @@ -111,7 +105,6 @@ _extracts_ them. * Returns: `[]` * **Throws error:** `ValidationException` - --- **Example 6:** @@ -124,23 +117,16 @@ _extracts_ them. * Returns: `[]` * **Throws error:** `ValidationException` - - - ## Parameter ### Regex regular expression -- ID: `regex` -- Datatype: `string` -- Default Value: `\w*` - - - - +* ID: `regex` +* Datatype: `string` +* Default Value: `\w*` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem-plugin-ulid.md b/docs/build/reference/transformer/Value/cmem-plugin-ulid.md index e6c12bc08..15c4be6fe 100644 --- a/docs/build/reference/transformer/Value/cmem-plugin-ulid.md +++ b/docs/build/reference/transformer/Value/cmem-plugin-ulid.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - ULID is a proposed identifier scheme, which produces time-based, random and sortable strings. The following features are highlighted [in the specification](https://github.com/ulid/spec): @@ -32,7 +31,6 @@ and sortable strings. The following features are highlighted This transform plugin allows for creation of ULID based identifiers (plain or URN). It does not support any input entities. - ## Parameter ### Number of Values @@ -43,8 +41,6 @@ Number of values to generate per entity. - Datatype: `Long` - Default Value: `1` - - ### Generate URNs Generate 'urn:x-ulid:*' strings. @@ -53,10 +49,6 @@ Generate 'urn:x-ulid:*' strings. - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1.md index db48a0ae6..a41671162 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1.md @@ -15,12 +15,9 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - UUIDv1 is generated from a host ID, sequence number, and the current time. - - ## Parameter ### Node (default: hardware address) @@ -31,8 +28,6 @@ Node value in the form "01:23:45:67:89:AB", 01-23-45-67-89-AB", or "0123456789AB - Datatype: `string` - Default Value: `None` - - ### Clock sequence (default: random) If clock sequence is given, it is used as the sequence number. Otherwise a random 14-bit sequence number is chosen. @@ -41,10 +36,6 @@ If clock sequence is given, it is used as the sequence number. Otherwise a rando - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1ToUUID6.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1ToUUID6.md index 54b91d670..4e2055f1e 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1ToUUID6.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1ToUUID6.md @@ -15,17 +15,15 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - UUIDv6 is a field-compatible version of UUIDv1, reordered for improved DB locality. It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID3.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID3.md index 4de98375e..483731046 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID3.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID3.md @@ -28,8 +28,6 @@ The namespace. - Datatype: `string` - Default Value: `None` - - ### Namespace as UUID Applies only if none of the pre-defined namespaces is selected. If enabled, the namespace string needs to be a valid UUID. Otherwise, the namespace UUID is a UUIDv1 derived from the MD5 hash of the namespace string. @@ -38,10 +36,6 @@ Applies only if none of the pre-defined namespaces is selected. If enabled, the - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID4.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID4.md index e612c2725..3694e7768 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID4.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID4.md @@ -23,4 +23,4 @@ UUIDv4 specifies a random UUID. ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID5.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID5.md index 8a3e96c0d..3a93b8e0c 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID5.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID5.md @@ -28,8 +28,6 @@ If 'namespace' is not given, the input string is used. - Datatype: `string` - Default Value: `None` - - ### Namespace as UUID Applies only if none of the pre-defined namespaces is selected. If enabled, the namespace string needs to be a valid UUID. Otherwise, the namespace UUID is a UUIDv1 derived from the SHA1 hash of the namespace string. @@ -38,10 +36,6 @@ Applies only if none of the pre-defined namespaces is selected. If enabled, the - Datatype: `boolean` - Default Value: `false` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID6.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID6.md index 8f500ee50..4297d57d2 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID6.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID6.md @@ -15,7 +15,6 @@ tags: In order to use it, you need to install it, e.g. with cmemc. - UUIDv6 is generated from a host ID, sequence number, and the current time. @@ -24,7 +23,6 @@ improved DB locality. It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. - ## Parameter ### Node (default: hardware address) @@ -35,8 +33,6 @@ Node value in the form "01:23:45:67:89:AB", 01-23-45-67-89-AB", or "0123456789AB - Datatype: `string` - Default Value: `None` - - ### Clock sequence (default: random) If clock sequence is given, it is used as the sequence number. Otherwise a random 14-bit number is chosen. @@ -45,10 +41,6 @@ If clock sequence is given, it is used as the sequence number. Otherwise a rando - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID7.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID7.md index 0ce20ef3d..ff98bd6d5 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID7.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID7.md @@ -23,11 +23,10 @@ excluded. As well as improved entropy characteristics over versions Implementations SHOULD utilize UUIDv7 over UUIDv1 and 6 if possible. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID8.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID8.md index d97f8ced7..7d139eb86 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID8.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID8.md @@ -20,11 +20,10 @@ widely implemented and well known Unix Epoch timestamp source, the number of nanoseconds since midnight 1 Jan 1970 UTC, leap seconds excluded. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDConvert.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDConvert.md index 1d996c8e6..b52b57e80 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDConvert.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDConvert.md @@ -32,8 +32,6 @@ Input string format - Datatype: `string` - Default Value: `uuid_hex` - - ### To Output string format @@ -42,10 +40,6 @@ Output string format - Datatype: `string` - Default Value: `hex` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDVersion.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDVersion.md index 4800c0d8e..39d23e6ab 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDVersion.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDVersion.md @@ -23,4 +23,4 @@ Input: UUID string, output: UUID version number of input. ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/constant.md b/docs/build/reference/transformer/Value/constant.md index b0eb141a3..1ca43caa4 100644 --- a/docs/build/reference/transformer/Value/constant.md +++ b/docs/build/reference/transformer/Value/constant.md @@ -8,8 +8,6 @@ tags: # Constant - - Generates a constant value. ## Examples @@ -24,23 +22,16 @@ Generates a constant value. * Returns: `[John]` - - - ## Parameter ### Value The constant value to be generated -- ID: `value` -- Datatype: `string` -- Default Value: `None` - - - - +* ID: `value` +* Datatype: `string` +* Default Value: `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/constantUri.md b/docs/build/reference/transformer/Value/constantUri.md index f64f74efa..03ecab8e3 100644 --- a/docs/build/reference/transformer/Value/constantUri.md +++ b/docs/build/reference/transformer/Value/constantUri.md @@ -8,11 +8,8 @@ tags: # Constant URI - - Generates a constant URI. - ## Parameter ### Value @@ -23,10 +20,6 @@ The constant URI to be generated - Datatype: `uri` - Default Value: `owl:Class` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/datasetParameter.md b/docs/build/reference/transformer/Value/datasetParameter.md index 6f0ffc84c..893e40baa 100644 --- a/docs/build/reference/transformer/Value/datasetParameter.md +++ b/docs/build/reference/transformer/Value/datasetParameter.md @@ -8,11 +8,8 @@ tags: # Dataset parameter - - Reads a meta data parameter from a dataset in Corporate Memory. If authentication is enabled, workbench.superuser must be configured. - ## Parameter ### Project @@ -23,8 +20,6 @@ The project of the dataset. - Datatype: `project` - Default Value: `cmem` - - ### Dataset The dataset the meta data parameter is read from. @@ -33,8 +28,6 @@ The dataset the meta data parameter is read from. - Datatype: `task` - Default Value: `None` - - ### Key No description @@ -43,8 +36,6 @@ No description - Datatype: `string` - Default Value: `None` - - ### Lang No description @@ -53,10 +44,6 @@ No description - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/defaultValue.md b/docs/build/reference/transformer/Value/defaultValue.md index 7fce2bb23..6fa7df705 100644 --- a/docs/build/reference/transformer/Value/defaultValue.md +++ b/docs/build/reference/transformer/Value/defaultValue.md @@ -8,8 +8,6 @@ tags: # Default Value - - Generates a default value, if the input values are empty. Forwards any non-empty values. ## Examples @@ -24,7 +22,6 @@ Generates a default value, if the input values are empty. Forwards any non-empty * Returns: `[input value]` - --- **Outputs the default value, if the inputs are empty:** @@ -36,23 +33,16 @@ Generates a default value, if the input values are empty. Forwards any non-empty * Returns: `[default value]` - - - ## Parameter ### Value The default value to be generated, if input values are empty -- ID: `value` -- Datatype: `string` -- Default Value: `default` - - - - +* ID: `value` +* Datatype: `string` +* Default Value: `default` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/emptyValue.md b/docs/build/reference/transformer/Value/emptyValue.md index c301f2316..649854e3d 100644 --- a/docs/build/reference/transformer/Value/emptyValue.md +++ b/docs/build/reference/transformer/Value/emptyValue.md @@ -8,15 +8,12 @@ tags: # Empty value - - Generates an empty value. - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/inputHash.md b/docs/build/reference/transformer/Value/inputHash.md index 04ce371f6..b78c7df08 100644 --- a/docs/build/reference/transformer/Value/inputHash.md +++ b/docs/build/reference/transformer/Value/inputHash.md @@ -8,8 +8,6 @@ tags: # Input hash - - Calculates the hash sum of the input values. Generates a single hash sum for all input values combined. This operator supports using different hash algorithms from the [Secure Hash Algorithms family](https://en.wikipedia.org/wiki/Secure_Hash_Algorithms) (SHA, e.g. SHA256) and two algorithms from the [Message-Digest Algorithm family](https://en.wikipedia.org/wiki/MD5) (MD2 / MD5). Please be aware that some of these algorithms are not secure due the possibility of collision attacks and other attacks. @@ -25,23 +23,16 @@ This operator supports using different hash algorithms from the [Secure Hash Alg * Returns: `[f708c2afff0ed197e8551c4dd549ee5b848e0b407106cbdb8e451c8cd1479362]` - - - ## Parameter ### Algorithm The hash algorithm to be used. -- ID: `algorithm` -- Datatype: `string` -- Default Value: `SHA256` - - - - +* ID: `algorithm` +* Datatype: `string` +* Default Value: `SHA256` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/randomNumber.md b/docs/build/reference/transformer/Value/randomNumber.md index 01c4f222c..1ad34e622 100644 --- a/docs/build/reference/transformer/Value/randomNumber.md +++ b/docs/build/reference/transformer/Value/randomNumber.md @@ -8,11 +8,8 @@ tags: # Random number - - Generates a set of random numbers. - ## Parameter ### Min @@ -23,8 +20,6 @@ The smallest number that could be generated. - Datatype: `double` - Default Value: `0.0` - - ### Max The largest number that could be generated. @@ -33,8 +28,6 @@ The largest number that could be generated. - Datatype: `double` - Default Value: `100.0` - - ### Min count The minimum number of values to generate in each set. @@ -43,8 +36,6 @@ The minimum number of values to generate in each set. - Datatype: `int` - Default Value: `1` - - ### Max count The maximum number of values to generate in each set. @@ -53,10 +44,6 @@ The maximum number of values to generate in each set. - Datatype: `int` - Default Value: `1` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/readParameter.md b/docs/build/reference/transformer/Value/readParameter.md index fadc0d8ef..088e0feed 100644 --- a/docs/build/reference/transformer/Value/readParameter.md +++ b/docs/build/reference/transformer/Value/readParameter.md @@ -8,11 +8,8 @@ tags: # Read parameter - - Reads a parameter from a Java Properties file. - ## Parameter ### Resource @@ -23,8 +20,6 @@ The Java properties file to read the parameter from. - Datatype: `resource` - Default Value: `None` - - ### Parameter The name of the parameter. @@ -33,10 +28,6 @@ The name of the parameter. - Datatype: `string` - Default Value: `None` - - - - ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/Value/uuid.md b/docs/build/reference/transformer/Value/uuid.md index 0738b722c..950657928 100644 --- a/docs/build/reference/transformer/Value/uuid.md +++ b/docs/build/reference/transformer/Value/uuid.md @@ -8,14 +8,11 @@ tags: # UUID - - Generates UUIDs. If no input value is provided, a random UUID (type 4) is generated using a cryptographically strong pseudo random number generator. If input values are provided, a name-based UUID (type 3) is generated for each input value. Each input value will generate a separate UUID. For building a UUID from multiple inputs, the Concatenate operator can be used. - ## Examples **Notation:** List of values are represented via square brackets. Example: `[first, second]` represents a list of two values "first" and "second". @@ -28,7 +25,6 @@ Each input value will generate a separate UUID. For building a UUID from multipl * Returns: `[cee963a2-8f70-3e97-b51a-85ef732e66dd]` - --- **Example 2:** @@ -37,13 +33,10 @@ Each input value will generate a separate UUID. For building a UUID from multipl * Returns: `[690802dd-a317-335f-807c-e4e1e32b7b5b, 925cbd7f-377b-3fbd-8f4c-ca41529b74ad]` - - - ## Parameter `None` ## Advanced Parameter -`None` \ No newline at end of file +`None` diff --git a/docs/build/reference/transformer/index.md b/docs/build/reference/transformer/index.md index 208b1785d..1a5267501 100644 --- a/docs/build/reference/transformer/index.md +++ b/docs/build/reference/transformer/index.md @@ -83,7 +83,7 @@ Transform operators transform a one or more sequences of string values to a sequ | [Fix URI](Normalize/uriFix.md) | Normalize | Generates valid absolute URIs from the given values. Already valid absolute URIs are left untouched. | | [Floor](Excel/Excel_FLOOR.md) | Excel | Excel FLOOR(number; significance; mode): Rounds the given number down to the nearest multiple of significance. Significance is the value to whose multiple of ten the number is to be rounded down (.01, .1, 1, 10, etc.). Mode is an optional value. If it is indicated and non-zero and if the number and significance are negative, rounding up is carried out based on that value. | | [Forecast](Excel/Excel_FORECAST.md) | Excel | Excel FORECAST(value; data_Y; data_X): Extrapolates future values based on existing x and y values. Value is the x value, for which the y value of the linear regression is to be returned. Data_Y is the array or range of known y's. Data_X is the array or range of known x's. Does not work for exponential functions. | - | [Format number](Numeric/formatNumber.md) | Numeric | Formats a number according to a user-defined pattern. The pattern syntax is documented at: https://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html | + | [Format number](Numeric/formatNumber.md) | Numeric | Formats a number according to a user-defined pattern. The pattern syntax is documented at: | | [Fv](Excel/Excel_FV.md) | Excel | Excel FV(rate; NPER; PMT; PV; type): Returns the future value of an investment based on periodic, constant payments and a constant interest rate. Rate is the periodic interest rate. NPER is the total number of periods. PMT is the annuity paid regularly per period. PV (optional) is the present cash value of an investment. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. | | [Geomean](Excel/Excel_GEOMEAN.md) | Excel | Excel GEOMEAN(number_1; number_2; ... number_30): Returns the geometric mean of a sample. Number_1; number_2; ... number_30 are numerical arguments or ranges that represent a random sample. | | [Get value by index](Sequence/getValueByIndex.md) | Sequence | Returns the value found at the specified index. Fails or returns an empty result depending on failIfNoFound is set or not. Please be aware that this will work only if the data source supports some kind of ordering like XML or JSON. This is probably not a good idea to do with RDF models. If emptyStringToEmptyResult is true then instead of a result with an empty String, an empty result is returned. | diff --git a/docs/build/snowflake-tutorial/index.md b/docs/build/snowflake-tutorial/index.md index de73a1cec..ce8887378 100644 --- a/docs/build/snowflake-tutorial/index.md +++ b/docs/build/snowflake-tutorial/index.md @@ -22,17 +22,17 @@ This integration enables organizations to make informed decisions, improve their This tutorial contains the following step-by-step instructions to connect the Snowflake data-warehouse with eccenca corporate memory: -- [1. Configure Custom JDBC Driver](#1-configure-custom-jdbc-driver) -- [2. Create a database in Snowflake](#2-create-a-database-in-snowflake) -- [3. Create a project in eccenca Corporate Memory](#3-create-a-project-in-eccenca-corporate-memory) -- [4. Create a transformation to build mapping rules](#4-create-a-transformation-to-build-mapping-rules) -- [5. Create a knowledge graph](#5-create-a-knowledge-graph) +- [1. Configure Custom JDBC Driver](#1-configure-custom-jdbc-driver) +- [2. Create a database in Snowflake](#2-create-a-database-in-snowflake) +- [3. Create a project in eccenca Corporate Memory](#3-create-a-project-in-eccenca-corporate-memory) +- [4. Create a transformation to build mapping rules](#4-create-a-transformation-to-build-mapping-rules) +- [5. Create a knowledge graph](#5-create-a-knowledge-graph) ## Sample material The following material is used in this tutorial, you should download the files and have them at hand throughout the tutorial: -- The product data vocabulary [products_vocabulary.nt](products_vocabulary.nt) +- The product data vocabulary [products_vocabulary.nt](products_vocabulary.nt) ## 1. Configure Custom JDBC Driver @@ -42,8 +42,8 @@ The SQL-Dataset of eccenca Corporate Memory can access any database that offers This happens partly via Apache Spark SQL but requires no Spark specific configuration for eccenca Corporate Memory. For Snowflake supported (e.g. 3.13.34) JDBC drivers can be found at: -- [MVN Repository](https://repo1.maven.org/maven2/net/snowflake/snowflake-jdbc/) ([direct jar download](https://repo1.maven.org/maven2/net/snowflake/snowflake-jdbc/3.13.34/snowflake-jdbc-3.13.34.jar)) -- to verify and build yourself: [github.com/snowflakedb/snowflake-jdbc](https://github.com/snowflakedb/snowflake-jdbc) +- [MVN Repository](https://repo1.maven.org/maven2/net/snowflake/snowflake-jdbc/) ([direct jar download](https://repo1.maven.org/maven2/net/snowflake/snowflake-jdbc/3.13.34/snowflake-jdbc-3.13.34.jar)) +- to verify and build yourself: [github.com/snowflakedb/snowflake-jdbc](https://github.com/snowflakedb/snowflake-jdbc) To use the driver it needs to be part of the classpath of eccenca Build (DataIntegration). That can be achieved in multiple ways but it is recommended to register the driver via the `dataintegration.conf` configuration file. @@ -72,35 +72,35 @@ There are 3 settings to specify: ## 2. Create a database in Snowflake -- Login to Snowflake enter the **username** and **password**, then click on **Sign in**. +- Login to Snowflake enter the **username** and **password**, then click on **Sign in**. ![image](snowflake-login.png){ width="50%" class="bordered" } -- Click on **Database** on the left side of the page. +- Click on **Database** on the left side of the page. ![image](snowflake-click_on_database.png){ class="bordered" } -- Click on **+Database** on the right side of the page. +- Click on **+Database** on the right side of the page. ![image](snowflake-add-database.png){ class="bordered" } -- Type the database name **Product**, then click on **Create**. +- Type the database name **Product**, then click on **Create**. ![image](snowflake-product-name-database.png){ width="50%" class="bordered" } -- Click on database **product**, then click on **+Schema** on the right side of the page. +- Click on database **product**, then click on **+Schema** on the right side of the page. ![image](snowflake-dd-schema.png){ class="bordered" } -- Type the schema name **products_vocabulary** and click on **Create**. +- Type the schema name **products_vocabulary** and click on **Create**. ![image](snowflake-schema-name.png){ width="50%" class="bordered" } -- Click on scheme **products_vocabulary** on the left side of the page then click on **Create** on the right side of the page, then click on **Table**, then select then **Standard**. +- Click on scheme **products_vocabulary** on the left side of the page then click on **Create** on the right side of the page, then click on **Table**, then select then **Standard**. ![image](snowflake-click-on-standard.png){ class="bordered" } -- Click on schema name **products_vocabulary** on the left side of the page and type the **sql query** for creating a table in the center, then click on **Run** on the right side of the page. +- Click on schema name **products_vocabulary** on the left side of the page and type the **sql query** for creating a table in the center, then click on **Run** on the right side of the page. You can create the _PRODUCT_ table with the following SQL query: @@ -117,7 +117,7 @@ CREATE TABLE product( ) ; ``` -- Type or copy the **SQL** query for creating a database in the table that is created, then click on **Run**. +- Type or copy the **SQL** query for creating a database in the table that is created, then click on **Run**. ![image](snowflake-sql-for-table.png){ class="bordered" } @@ -1140,31 +1140,31 @@ Here you can populate some test data with the following SQL query: ## 3. Create a project in eccenca Corporate Memory -- Click on **Create** on the right side of the page. +- Click on **Create** on the right side of the page. ![image](snowflake-create-project.png){ class="bordered" } -- Click on Project, then click on **Add**. +- Click on Project, then click on **Add**. ![image](snowflake-add-project.png){ class="bordered" } -- Type the project name **product** in the title field, then click on **Create**. +- Type the project name **product** in the title field, then click on **Create**. ![image](snowflake-project-name.png){ class="bordered" } -- Click on **Create** on the right side of the page. +- Click on **Create** on the right side of the page. ![image](snowflake-create.png){ class="bordered" } -- Click on **JDBC endpoint**, then click on **Add**. +- Click on **JDBC endpoint**, then click on **Add**. ![image](snowflake-jdbc.png){ class="bordered" } -- Type the name **product table (JDBC)** in the label field. +- Type the name **product table (JDBC)** in the label field. ![image](snowflake-product-name.png){ class="bordered" } -- Type the **JDBC URL** path in the **JDBC Driver connection URL** field. +- Type the **JDBC URL** path in the **JDBC Driver connection URL** field. !!! Note @@ -1178,21 +1178,21 @@ Here you can populate some test data with the following SQL query: Here is a breakdown of the elements of this example connection string. -- `jdbc:snowflake://` is the prefix for the snowflake JDBC driver. +- `jdbc:snowflake://` is the prefix for the snowflake JDBC driver. -- `kiaouyb-fe21477.snowflakecomputing.com` is the URL for the snowflake account you want to connect to. +- `kiaouyb-fe21477.snowflakecomputing.com` is the URL for the snowflake account you want to connect to. The number `WTXSZXM-FS77078` is the organization number you will get from Snowflake as shown below. ![image](snowflake-organization-number.png){ class="bordered" } -- `?db=product` specifies the name of the Snowflake database you want to connect to.In this case, the database is named product. +- `?db=product` specifies the name of the Snowflake database you want to connect to.In this case, the database is named product. -- `&schema=products_vocabulary` specifies the name of the Snowflake schema that you want to use within the specified database. +- `&schema=products_vocabulary` specifies the name of the Snowflake schema that you want to use within the specified database. In this case, the schema name is _products_vocabulary_. ![image](snowflake-jdbc-uri-name.png){ class="bordered" } -- Type Source query as +- Type Source query as ```sparql SELECT * from product @@ -1200,19 +1200,19 @@ Here is a breakdown of the elements of this example connection string. ![image](snowflake-query-source.png){ class="bordered" } -- Select the **Query strategy** as **Execute the given source query.No paging or virtual query**. +- Select the **Query strategy** as **Execute the given source query.No paging or virtual query**. ![image](snowflake-query-strategy.png){ class="bordered" } -- Select the **Write strategy** as **An exception will be thrown, if the table already exists.** +- Select the **Write strategy** as **An exception will be thrown, if the table already exists.** ![image](snowflake-write-stategy.png){ class="bordered" } -- Click on the **ADVANCED OPTIONS**. +- Click on the **ADVANCED OPTIONS**. ![image](snowflake-jdbc-dataset-advanced-options.png){ class="bordered" } -- Type **Username** and **Password** in the dialog window, then click on **Create**. +- Type **Username** and **Password** in the dialog window, then click on **Create**. ![image](snowflake-userpassword.png){ class="bordered" } @@ -1224,52 +1224,52 @@ Here is a breakdown of the elements of this example connection string. ## 4. Create a transformation to build mapping rules -- Click on **Create** on the right side of the page. +- Click on **Create** on the right side of the page. ![image](snowflake-click-on-create.png){ class="bordered" } -- Click on **Transform** on the left side of the page, then on **Transform** in the centre of the page, then click on **Add**. +- Click on **Transform** on the left side of the page, then on **Transform** in the centre of the page, then click on **Add**. ![image](snowflake-transformation.png){ class="bordered" } -- Type the name **product** in the **Label** field, in the **INPUT TASK Dataset** select **Product Table (JDBC)** and in the **Type** field select **table**. +- Type the name **product** in the **Label** field, in the **INPUT TASK Dataset** select **Product Table (JDBC)** and in the **Type** field select **table**. ![image](snowflake-trans-connect.png){ class="bordered" } ![image](snowflake-type-table.png){ class="bordered" } -- In the **Output** dataset field select **product graph**, then click on **Create** . +- In the **Output** dataset field select **product graph**, then click on **Create** . ![image](snowflake-output-gp.png){ class="bordered" } -- Click on **Mapping**, then click on **Edit**. +- Click on **Mapping**, then click on **Edit**. ![image](snowflake-click-on-mapping.png){ class="bordered" } -- For the target entity select **Product (pv:product)**. +- For the target entity select **Product (pv:product)**. ![image](snowflake-target-entity.png){ class="bordered" } -- Click on **create custom pattern**. +- Click on **create custom pattern**. ![image](snowflake-custom-pattern.png){ class="bordered" } -- Type the URI pattern as ``. +- Type the URI pattern as ``. You can use either company.org or company.com as per your requirement. Then type the label name as **product** and then click on **save**. ![image](snowflake-uri-pattern.png){ class="bordered" } -- Click on **+Icon**, then select the **Add value mapping**. +- Click on **+Icon**, then select the **Add value mapping**. ![image](snowflake-add-value.png){ class="bordered" } -- Select the **target property** according to transformation requirements, for example name, id, etc., then select the **value path** according to the target property as the product name, product id etc. +- Select the **target property** according to transformation requirements, for example name, id, etc., then select the **value path** according to the target property as the product name, product id etc. This step will help in mapping the data from the source to the target property. ![image](snowflake-target-property.png){ class="bordered" } -- Type the label name **product name**, then click on **Save**. +- Type the label name **product name**, then click on **Save**. ![image](snowflake-trans-label.png){ class="bordered" } @@ -1295,25 +1295,25 @@ Here is a breakdown of the elements of this example connection string. Suggestions generated are based on vocabulary which describes the data in the CSV files: [products_vocabulary.nt](products_vocabulary.nt) -- **Tick** the box to select the suggestions to be added, then click on **Add**. +- **Tick** the box to select the suggestions to be added, then click on **Add**. ![image](snowflake-tick.png){ class="bordered" } ## 5. Create a knowledge graph -- Click on **Create** on the right side of the page. +- Click on **Create** on the right side of the page. ![image](snowflake-trans-result.png){ class="bordered" } -- Select **Knowledge Graph**, then click on **Add**. +- Select **Knowledge Graph**, then click on **Add**. ![image](snowflake-kg-graph.png){ class="bordered" } -- Select the **target project** from the drop down menu as **product**. +- Select the **target project** from the drop down menu as **product**. ![image](snowflake-graph-target.png){ class="bordered" } -- Type **product graph** in the label field, then enter the **graph URI** in the Graph field, then click on **Create**. +- Type **product graph** in the label field, then enter the **graph URI** in the Graph field, then click on **Create**. ![image](snowflake-graph-uri.png){ class="bordered" } @@ -1322,4 +1322,3 @@ Here is a breakdown of the elements of this example connection string. Graph is created successfully. ![image](snowflake-easynav-graph.png){ class="bordered" } - diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md index 2a3029c10..a0f6415c6 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md @@ -9,31 +9,31 @@ In this tutorial, we will show how to (1) define the information available in an In the previous tutorial, we have defined the use cases in contact with the humans, ie. analysts (see figure 1) -

- -

- -*Figure 1. We need to imagine an interface where analysts can list the IoCs during the incident and read all their documentations.* +
+![Figure 1. We need to imagine an interface where analysts can list the IoCs during the incident and read all their documentations.](use_cases_with_interfaces.png) +
Figure 1. We need to imagine an interface where analysts can list the IoCs during the incident and read all their documentations.
+
The classic Splunk interface is a set of panels, like "static table" panel. This table panel can show a table of cells and also one cell with a text via in input a Splunk Search Processing Language (SPL). With the plugin [Linked Data App](../link-IDS-event-to-KG/eccenca_commands.tar.gz) ([tutorial page](../link-IDS-event-to-KG/index.md)), we can insert a SPARQL query and select the part of your knowledge graph to print (figure 2). -![](demo-splunk-with-kg-edit.png) - -*Figure 2. An analyst can insert a SPARQL query with Splunk token in input of one "static table" panel of his dashboard with the plugin "Linked Data App"* +
+![Figure 2. An analyst can insert a SPARQL query with Splunk token in input of one "static table" panel of his dashboard with the plugin "Linked Data App"](demo-splunk-with-kg-edit.png) +
Figure 2. An analyst can insert a SPARQL query with Splunk token in input of one "static table" panel of his dashboard with the plugin "Linked Data App"
+
The first dashboard to do for our use cases is the list of IoCs with classic SPL queries of analysts via a static table and allow the analyst to select one IoC via a click in the table. The dashboard with this selected row can save the ID of IoC in a global variable for the other panels in the same dashboard ([a Splunk token](https://docs.splunk.com/Documentation/Splunk/9.0.5/Viz/tokens)). When this variable (Splunk token) is changed by the user, Splunk is able to recalculate automatically the queries with this variable in the other static tables. So with this mechanism, we can print the details in the knowledge graph (with SPARQL queries) and the IoC statistics in the Splunk indexes (with SPL queries) around of one selected IoC. With these knowledge about Splunk dashboard, we proposed to analysts a first naive interface in the figure 3. -

- -

- -*Figure 3. Imagine the expected Splunk dashboard with its interaction* +
+![Figure 3. Imagine the expected Splunk dashboard with its interaction](interface.png) +
Figure 3. Imagine the expected Splunk dashboard with its interaction
+
Here, the figure 3 is nice but before this first schema during the project, there are a lot of shemas and all were minimalist and ugly often only on a whiteboard. This type schema before the technical feasibility is only to validate the objective with the analysts before starting the development. During the technical feasibility, we can decrease/increase step-by-step your objectives to show finally a first result in figure 4 in a real dashboard. -![](../link-IDS-event-to-KG/demo_ld_without_html.png) - -*Figure 4. First interface with only SPARQL queries in SPLUNK static tables.* +
+![Figure 4. First interface with only SPARQL queries in SPLUNK static tables.](./../../link-IDS-event-to-KG/demo_ld_without_html.png) +
Figure 4. First interface with only SPARQL queries in SPLUNK static tables.
+
## Technical feasibility with the available information @@ -42,9 +42,10 @@ In this tutorial, we learn to use Corporate Memory of Eccenca to transform these After research and one meeting with analysts, we have chosen the datasets of Mitre Attack, the datasets of IoC rules (Sigma and Hayabusa) in Github and of course, the IoCs data already in the Splunk indexes. -![](alert_data_of_ioc.png) - -*Figure 5. Define the information available in alerts of IDS and in OSINT to link these information.* +
+![Figure 5. Define the information available in alerts of IDS and in OSINT to link these information.](alert_data_of_ioc.png) +
Figure 5. Define the information available in alerts of IDS and in OSINT to link these information.
+
The Splunk indexes of IoCs are selected by the analysts in the dashboard via the component [multiselect input](https://docs.splunk.com/Documentation/Splunk/9.0.5/Viz/FormEditor#Multiselect) in the form part of dashboard (the form part inits other Splunk tokens). We have choosen the IDs to link these data and the figure 5 resumes how we are going to link these data via Corporate Memory of Eccenca. @@ -56,9 +57,9 @@ A user can clone any dashboard before modifying it. For example, in our dashboard, you can find: -- the root element `form`, -- the definition of input component to select the Splunk indexes by the user and -- the table panel to execute a SPL query and show the result in a table +- the root element `form`, +- the definition of input component to select the Splunk indexes by the user and +- the table panel to execute a SPL query and show the result in a table ```xml
@@ -158,24 +159,29 @@ WHERE { During our project, we have implemented the SPARQL command necessary to execute a SPARQL query in a SPL query but also several scripts to extend the panels of dashboard. For example, these are problems to print a HTML text and open a external Web page in a dashboard. Before starting a knowledge graph, we need to know if we have to work with a specific syntax in output for Splunk. So, we have searched the simplest solution to print the HTML literal in our knowledge graph with their links. We found it and implemented simple Javascript scripts to resolve these problems. These scripts are imported via the header of dashboard XML and called in the XML of static table panel. You can see the final dashboard with the Mitre description in HTML (the Mitre in these datasets uses Markdown but we show how convert Markdown link to HTML). We give you these scripts in your [Linked Data App](../link-IDS-event-to-KG/eccenca_commands.tar.gz) ([tutorial page](../link-IDS-event-to-KG/index.md)). -![](../link-IDS-event-to-KG/demo_ld.png) - -*Figure 6. With an extern Javascript script, static tables support HTML and the user can open.* +
+![Figure 6. With an extern Javascript script, static tables support HTML and the user can open.](../link-IDS-event-to-KG/demo_ld.png) +
Figure 6. With an extern Javascript script, static tables support HTML and the user can open.
+
## Conclusion: starting to specify the necessary RDF models for these interfaces With the interfaces, the available data and their links in head, the analyst can now imagine the necessary RDF models of concepts (for example, figure 7 and 8) in his future knowledge graph to generate expected dashboards. These RDF models evolve at the same time as the interfaces (forever) and according to future RDF standards of Cyber world. With Corporate Memory, after each evolution of your models, you can rebuild your knowledge graph "from scratch" when you want. Several RDF models of different versions can exist in your knowledge graphs, so you can update progressively your dashboards without service interruption of old dashboards. -![](../lift-data-from-STIX-2.1-data-of-mitre-attack/rdf-model-course-of-action.png) -*Figure 7. RDF model of Mitre concept "course of action" in our future knowledge graph.* +
+![Figure 7. RDF model of Mitre concept "course of action" in our future knowledge graph.](./../lift-data-from-STIX-2.1-data-of-mitre-attack/rdf-model-course-of-action.png) +
Figure 7. RDF model of Mitre concept "course of action" in our future knowledge graph.
+
-![](../lift-data-from-YAML-data-of-hayabusa-sigma/23-1-rdf-model-rule.png) -*Figure 8. RDF model of concept "IoC Rule" in our future knowledge graph.* +
+![Figure 8. RDF model of concept "IoC Rule" in our future knowledge graph.](./../lift-data-from-YAML-data-of-hayabusa-sigma/23-1-rdf-model-rule.png) +
Figure 8. RDF model of concept "IoC Rule" in our future knowledge graph.
+
--- Tutorial: [how to link Intrusion Detection Systems (IDS) to Open-Source INTelligence (OSINT)](../index.md) -Next chapter: [Build a Knowledge Graph from MITRE ATT&CK® datasets](../lift-data-from-STIX-2.1-data-of-mitre-attack/index.md) +Next chapter: [Build a Knowledge Graph from MITRE ATT&CK® datasets](./../../lift-data-from-STIX-2.1-data-of-mitre-attack/index.md) Previous chapter: [Define the need, the expected result and the use cases](../define-the-need/index.md) diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/define-the-need/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/define-the-need/index.md index 035fc148b..903e7e13e 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/define-the-need/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/define-the-need/index.md @@ -75,9 +75,10 @@ In this tutorial, we study only this first result: To achieve such savings we aggregate all links of sources and references about alerts in the Security information and event management (SIEM) in a knowledge graph. Analyst are able to read the Mitre information directly in his timeline (e.g. in SPLUNK) and to access all references about an alert from this central place. - ![](slide_result_expected.png) - - *Figure 1. Example of expected results for analysts during the task to understand the meaning and relevance of new alerts in their IDS.* +
+ ![Figure 1. Example of expected results for analysts during the task to understand the meaning and relevance of new alerts in their IDS.](slide_result_expected.png) +
Figure 1. Example of expected results for analysts during the task to understand the meaning and relevance of new alerts in their IDS.
+
When we know the waited results, we can imagine the necessary use cases. @@ -85,9 +86,10 @@ When we know the waited results, we can imagine the necessary use cases. We know the final need, the expected results and the limits of knowledge graph with the Linked Data technologies in Splunk. So, we can define the probable use cases to implement and all the actors who will interact with these use cases. -![](basic_use_cases.png) - -*Figure 2. UML use cases to resolve this basic need and several use cases with Wikidata to show the interoperability of knowledge graphs with the Linked data technologies. Each bubble in this type of schema is a use case.* +
+![Figure 2. UML use cases to resolve this basic need and several use cases with Wikidata to show the interoperability of knowledge graphs with the Linked data technologies. Each bubble in this type of schema is a use case.](basic_use_cases.png) +
Figure 2. UML use cases to resolve this basic need and several use cases with Wikidata to show the interoperability of knowledge graphs with the Linked data technologies. Each bubble in this type of schema is a use case.
+
With a simple UML schema of use cases, you can delimited each use case, their priorities and their tasks for the next step, ie. specify the essential interfaces to limit the complexity of future RDF graph. @@ -110,13 +112,13 @@ Another result of this project was to resolve this other need: For the moment, we are searching the best way to resolve this need but a demonstrator to manage several investigations in the same knowledge graphs is available with several examples of dasboards in the Splunk app "[Investigate lateral movements with a knowledge graph](../link-IDS-event-to-KG-via-cmem/eccenca_poc_investigate.tar.gz)" ([tutorial page](../link-IDS-event-to-KG/index.md)). This need is for advanced users of Corporate Memory and it may be proposed in a future tutorial. -![](advanced_use_cases.png) - -*Figure 3. UML use cases to resolve this avanced need.* +
+![Figure 3. UML use cases to resolve this avanced need.](advanced_use_cases.png) +
Figure 3. UML use cases to resolve this avanced need.
+
--- Tutorial: [how to link Intrusion Detection Systems (IDS) to Open-Source INTelligence (OSINT)](../index.md) Next chapter: [Specify the dashboards of use cases before the RDF models](../define-the-interfaces/index.md) - diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md index ef1b16477..365a1b297 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md @@ -6,9 +6,9 @@ MITRE ATT&CK is a globally-accessible knowledge base of adversary tactics and te The MITRE ATT&CK datasets in STIX 2.1 JSON collections are here: -* [enterprise-attack.json](https://github.com/mitre-attack/attack-stix-data/tree/master/enterprise-attack/enterprise-attack.json){target=_blank} -* [mobile-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/mobile-attack/mobile-attack.json){target=_blank} -* [ics-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/ics-attack/ics-attack.json){target=_blank} +* [enterprise-attack.json](https://github.com/mitre-attack/attack-stix-data/tree/master/enterprise-attack/enterprise-attack.json){target=_blank} +* [mobile-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/mobile-attack/mobile-attack.json){target=_blank} +* [ics-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/ics-attack/ics-attack.json){target=_blank} [Structured Threat Information Expression (STIX™)]( https://oasis-open.github.io/cti-documentation/stix/intro.html) is a language and serialization format used to exchange cyber threat intelligence (CTI). @@ -51,9 +51,9 @@ For each type of dataset, you can create an new project with all the tools neces Create a new project, reproduce the demonstration in the following video: -* Title: MITRE ATT&CK® +* Title: MITRE ATT&CK® -* Description: MITRE ATT&CK® is a globally-accessible knowledge base of adversary tactics and techniques based on real-world observations. +* Description: MITRE ATT&CK® is a globally-accessible knowledge base of adversary tactics and techniques based on real-world observations. ![](23-1-create-project.gif) @@ -63,22 +63,22 @@ MITRE ATT&CK® has 3 domains: [Entreprise](https://attack.mitre.org/techniques/e Each domain dataset is saved in GitHub: -* [enterprise-attack.json](https://github.com/mitre-attack/attack-stix-data/tree/master/enterprise-attack/enterprise-attack.json) -* [mobile-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/mobile-attack/mobile-attack.json) -* [ics-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/ics-attack/ics-attack.json) +* [enterprise-attack.json](https://github.com/mitre-attack/attack-stix-data/tree/master/enterprise-attack/enterprise-attack.json) +* [mobile-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/mobile-attack/mobile-attack.json) +* [ics-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/ics-attack/ics-attack.json) 1. Download these 3 files 2. Create for each JSON file, a JSON dataset: -![](23-1-import-JSON.gif) + ![](23-1-import-JSON.gif) -!!! Tip + !!! Tip - Give a short name at each dataset/transformer/etc in Corporate Memory to recognize it easily in the workflow view. For example, we will use "MA Entreprise (JSON)" like label and "MITRE ATT&CK® Entreprise dataset STIX 2.1" like description for the Entreprise dataset and so "MA Mobile (JSON)" for Mobile, "MA ICS (JSON)" for ICS, etc. + Give a short name at each dataset/transformer/etc in Corporate Memory to recognize it easily in the workflow view. For example, we will use "MA Entreprise (JSON)" like label and "MITRE ATT&CK® Entreprise dataset STIX 2.1" like description for the Entreprise dataset and so "MA Mobile (JSON)" for Mobile, "MA ICS (JSON)" for ICS, etc. -!!! Success + !!! Success - Now, you can see these JSON datasets in Corporate Memory: - ![](23-1-import-JSON-result.png) + Now, you can see these JSON datasets in Corporate Memory: + ![](23-1-import-JSON-result.png) ### Create named graphs @@ -114,18 +114,18 @@ Create one RDF dataset for each Mitre dataset: 3. Put a URI of named graph 4. Enable "Clear graph before workflow execution" -![](23-1-create-RDF-dataset.gif) + ![](23-1-create-RDF-dataset.gif) -!!! Success + !!! Success - Now, you can see these RDF datasets in Corporate Memory: - ![](23-1-create-RDF-dataset-result.png) + Now, you can see these RDF datasets in Corporate Memory: + ![](23-1-create-RDF-dataset-result.png) -!!! Tip + !!! Tip - The consequence of the option "Clear graph before workflow execution" is the named graph will be deleted (with all its triples) before receiving new triples when you use this named graph like an output in a workflow and also in the transformer task (in the next step). + The consequence of the option "Clear graph before workflow execution" is the named graph will be deleted (with all its triples) before receiving new triples when you use this named graph like an output in a workflow and also in the transformer task (in the next step). - This option is to use only for the graphs which will generate automatically by Corporate Memory. + This option is to use only for the graphs which will generate automatically by Corporate Memory. ### Create a transformer @@ -159,253 +159,253 @@ Create one RDF dataset for each Mitre dataset: 1. Create the prefix of your vocabulary: - ```turtle - prefix ctia: - ``` + ```turtle + prefix ctia: + ``` -![](23-1-create-prefix.gif) + ![](23-1-create-prefix.gif) 2. Create the (Mitre) STIX 2.1 transformer -This transformer will be a component of your worflow. You could reuse it in several workflows in other projects. To create a new transformer, you need to give a: + This transformer will be a component of your worflow. You could reuse it in several workflows in other projects. To create a new transformer, you need to give a: -* Label: STIX 2.1 transformer -* Input: MA Entreprise (JSON) -* Output: MA Entreprise (knowledge graph) + * Label: STIX 2.1 transformer + * Input: MA Entreprise (JSON) + * Output: MA Entreprise (knowledge graph) -![](23-1-create-transformer.gif) + ![](23-1-create-transformer.gif) -!!! Tip + !!! Tip - In your use case, there is only this transformer to build this named graph, so there is no consequence on the final knowledge graph when we test this transformer on this graph (automatically cleared after each execution of transformer). However, a good practice is to create a tempory graph in ouput for each transformer, so your final knowledge graph is not affected during the modification of your transformer before executing the workflows with this transformer. In this case, you need to hide this tempory graph of your users. + In your use case, there is only this transformer to build this named graph, so there is no consequence on the final knowledge graph when we test this transformer on this graph (automatically cleared after each execution of transformer). However, a good practice is to create a tempory graph in ouput for each transformer, so your final knowledge graph is not affected during the modification of your transformer before executing the workflows with this transformer. In this case, you need to hide this tempory graph of your users. - You can create a transformer for several syntaxes in input: JSON, XML, CSV, etc. If your format does not exist in Corporate Memory, you can convert your data in JSON before importing this data in Corporate Memory. + You can create a transformer for several syntaxes in input: JSON, XML, CSV, etc. If your format does not exist in Corporate Memory, you can convert your data in JSON before importing this data in Corporate Memory. -!!! Info + !!! Info - STIX gives the possibility to extend its syntaxes. Mitre uses this possibility. So, in theory, if we need to import all the data, we can extend this transformer at all STIX attributes and add the Mitre attributes described in its [documentation](https://github.com/mitre/cti/blob/master/USAGE.md). + STIX gives the possibility to extend its syntaxes. Mitre uses this possibility. So, in theory, if we need to import all the data, we can extend this transformer at all STIX attributes and add the Mitre attributes described in its [documentation](https://github.com/mitre/cti/blob/master/USAGE.md). 3. Study the tree of STIX data -```json -{ - "type": "bundle", - "id": "bundle--19413d5e-67e5-4a48-a4c8-afb06b7954de", - "spec_version": "2.1", - "objects": [ - { - "type": "x-mitre-collection", - "id": "x-mitre-collection--1f5f1533-f617-4ca8-9ab4-6a02367fa019", - "name": "Enterprise ATT&CK", - "description": "ATT&CK for Enterprise provides a knowledge base of real-world adversary behavior targeting traditional enterprise networks. ATT&CK for Enterprise covers the following platforms: Windows, macOS, Linux, PRE, Office 365, Google Workspace, IaaS, Network, and Containers.", - ... - }, - { - "id": "attack-pattern--0042a9f5-f053-4769-b3ef-9ad018dfa298", - "type": "attack-pattern", - "name": "Extra Window Memory Injection", - "description": "Adversaries may inject malicious code..." , - "external_references": [ - { - "source_name": "mitre-attack", - "external_id": "T1055.011", - "url": "https://attack.mitre.org/techniques/T1055/011" - }, - { - "url": "https://msdn.microsoft.com/library/windows/desktop/ms633574.aspx", - "description": "Microsoft. (n.d.). About Window Classes. Retrieved December 16, 2017.", - "source_name": "Microsoft Window Classes" - },... -``` + ```json + { + "type": "bundle", + "id": "bundle--19413d5e-67e5-4a48-a4c8-afb06b7954de", + "spec_version": "2.1", + "objects": [ + { + "type": "x-mitre-collection", + "id": "x-mitre-collection--1f5f1533-f617-4ca8-9ab4-6a02367fa019", + "name": "Enterprise ATT&CK", + "description": "ATT&CK for Enterprise provides a knowledge base of real-world adversary behavior targeting traditional enterprise networks. ATT&CK for Enterprise covers the following platforms: Windows, macOS, Linux, PRE, Office 365, Google Workspace, IaaS, Network, and Containers.", + … + }, + { + "id": "attack-pattern--0042a9f5-f053-4769-b3ef-9ad018dfa298", + "type": "attack-pattern", + "name": "Extra Window Memory Injection", + "description": "Adversaries may inject malicious code..." , + "external_references": [ + { + "source_name": "mitre-attack", + "external_id": "T1055.011", + "url": "https://attack.mitre.org/techniques/T1055/011" + }, + { + "url": "https://msdn.microsoft.com/library/windows/desktop/ms633574.aspx", + "description": "Microsoft. (n.d.). About Window Classes. Retrieved December 16, 2017.", + "source_name": "Microsoft Window Classes" + },… + ``` -To extract STIX objects with its type, its label, its description and its references, we need to navigate via a root object of type "bundle" before touching the STIX objects. Each object has an ID, we suppose unique in all Mitre datasets to generate IRI of all objects. We use your prefix ctia to build the class name and the properties of your RDFS vocabulary. Here, we build the vocabulary of manner agile for your use case because Mitre had not proposed a RDFS vocabulary for its datasets. + To extract STIX objects with its type, its label, its description and its references, we need to navigate via a root object of type "bundle" before touching the STIX objects. Each object has an ID, we suppose unique in all Mitre datasets to generate IRI of all objects. We use your prefix ctia to build the class name and the properties of your RDFS vocabulary. Here, we build the vocabulary of manner agile for your use case because Mitre had not proposed a RDFS vocabulary for its datasets. 4. Create the root object and give it an unique IRI: -* RDF type: ctia:Object -* IRI pattern: + * RDF type: ctia:Object + * IRI pattern: -![](23-1-extract-root-object.gif) + ![](23-1-extract-root-object.gif) -!!! Tip + !!! Tip - You can develop an IRI from scratch in the IRI formula editor, like here or directly in the form and improve it after, if necessary (see an example in the next step). + You can develop an IRI from scratch in the IRI formula editor, like here or directly in the form and improve it after, if necessary (see an example in the next step). - The important is to test the result in the evaluation view. + The important is to test the result in the evaluation view. -!!! Success + !!! Success - During the development of a transformer, you can test your transformation and check all the steps. + During the development of a transformer, you can test your transformation and check all the steps. - ![](23-1-see-steps-during-a-transformation.png) + ![](23-1-see-steps-during-a-transformation.png) 5. Link the sub-objects to their root: -* Value path: objects + * Value path: objects -with their IRI and the property ctia:object: + with their IRI and the property ctia:object: -* RDF property: ctia:object -* RDF type: ctia:Object -* IRI pattern: + * RDF property: ctia:object + * RDF type: ctia:Object + * IRI pattern: -![](23-1-extract-objects.gif) + ![](23-1-extract-objects.gif) -!!! Tip + !!! Tip - The RDFS classes start by an uppercase and the property by a lowercase and apply the camel case notation, if possible. The objective is to create cool IRI, ie. lisible IDs for humans and unique on the Web. + The RDFS classes start by an uppercase and the property by a lowercase and apply the camel case notation, if possible. The objective is to create cool IRI, ie. lisible IDs for humans and unique on the Web. - There are exceptions, like Wikidata which prefers to use a number for their IRI but with a explicit label in all languages. + There are exceptions, like Wikidata which prefers to use a number for their IRI but with a explicit label in all languages. - Moreover, if there is no clear ontology in your domain, the best is to take the name of parameters of the source (here json). So, we will use the property, like `ctia:external_id` with underscore because it's the convention of Mitre in its datasets. If Mitre defines a best RDF ontology, we will modify simply your transformer to respect their new ontology. + Moreover, if there is no clear ontology in your domain, the best is to take the name of parameters of the source (here json). So, we will use the property, like `ctia:external_id` with underscore because it's the convention of Mitre in its datasets. If Mitre defines a best RDF ontology, we will modify simply your transformer to respect their new ontology. -!!! Tip + !!! Tip - We could limit the number of objects to import, if you add conditions in the formula editor with the field "type" of objects, for example. + We could limit the number of objects to import, if you add conditions in the formula editor with the field "type" of objects, for example. 6. Extract now their type, label and description with these properties for example: -* ctia:type - * RDF type: URI - * Via the "value forma editor" create the IRI: `https://github.com/mitre/cti/blob/master/USAGE.md#{type}` -* rdfs:label - * value path: name - * RDF type: String -* ctia:description - * value path: description - * RDF type: String + * ctia:type + * RDF type: URI + * Via the "value forma editor" create the IRI: `https://github.com/mitre/cti/blob/master/USAGE.md#{type}` + * rdfs:label + * value path: name + * RDF type: String + * ctia:description + * value path: description + * RDF type: String -![](23-1-extract-properties.gif) + ![](23-1-extract-properties.gif) -!!! Tip + !!! Tip - STIX type doesn't apply the camel case and doesn't start by an uppercase. We prefers to create a specific property ctia:type for this reason. + STIX type doesn't apply the camel case and doesn't start by an uppercase. We prefers to create a specific property ctia:type for this reason. - You can reuse a vocabulary already in Corporate Memory (like rdfs) but you are also free to develop a new vocabulary on the fly with your prefixes. + You can reuse a vocabulary already in Corporate Memory (like rdfs) but you are also free to develop a new vocabulary on the fly with your prefixes. -!!! Success + !!! Success - When you test your transformer, you can see the future instances in your knowledge graph: - ![](23-1-success-transformer.png) + When you test your transformer, you can see the future instances in your knowledge graph: + ![](23-1-success-transformer.png) 7. At the end of the last step, we saw the dataset uses the syntax of Markdown to define a Web link. In the interface of SPLUNK, we need to use the HTML syntax. Modify the formula for the description with the operator "regex replace". -* Regex: `\[([^\[\]]*)\]\(([^\(\)]*)\)` -* Replace: `$1` + * Regex: `\[([^\[\]]*)\]\(([^\(\)]*)\)` + * Replace: `$1` -![](23-1-regex-replace.gif) + ![](23-1-regex-replace.gif) -!!! Success - In the "value formula editor", you can immediatly check the result of your formula. - ![](23-1-regex-replace.png) + !!! Success + In the "value formula editor", you can immediatly check the result of your formula. + ![](23-1-regex-replace.png) -!!! Tip + !!! Tip - At any moment, you will modify your vocabulary according to your needs that you will find during your development. You need to modify this transformer and relaunch all your workflows which use this transformer. + At any moment, you will modify your vocabulary according to your needs that you will find during your development. You need to modify this transformer and relaunch all your workflows which use this transformer. -!!! Tip + !!! Tip - The regular expression are often necessary in the components of "value formula editor". The website [regex101](https://regex101.com/) will help you to develop and debug the regular expressions. + The regular expression are often necessary in the components of "value formula editor". The website [regex101](https://regex101.com/) will help you to develop and debug the regular expressions. 8. Via the same method, we are linking the references objects to their STIX objects: -* via the property: `ctia:external_references` -* Type: ctia:Reference -* value path: external_references -* IRI of each object: its own URL () + * via the property: `ctia:external_references` + * Type: ctia:Reference + * value path: external_references + * IRI of each object: its own URL () -ctia:Reference object has these properties: + ctia:Reference object has these properties: -* ctia:source_name -* ctia:description -* ctia:url -* ctia:external_id + * ctia:source_name + * ctia:description + * ctia:url + * ctia:external_id -![](23-1-extract-references.gif) + ![](23-1-extract-references.gif) -!!! Tip + !!! Tip - Sometimes, several urls are not correct. You can use the component "Fix URI" to fix the classic problems. + Sometimes, several urls are not correct. You can use the component "Fix URI" to fix the classic problems. - ![](23-2-fix-url.png) + ![](23-2-fix-url.png) -!!! Warning + !!! Warning - When you make a transformer on a dataset, you see quickly the limit of data. For example with Mitre, several references are a set of citations without URL. + When you make a transformer on a dataset, you see quickly the limit of data. For example with Mitre, several references are a set of citations without URL. - ![](23-1-warning-bad-uri.png) + ![](23-1-warning-bad-uri.png) - For example references with this description: `(Citation: Palo Alto menuPass Feb 2017)(Citation: DOJ APT10 Dec 2018)(Citation: District Court of NY APT10 Indictment December 2018)` + For example references with this description: `(Citation: Palo Alto menuPass Feb 2017)(Citation: DOJ APT10 Dec 2018)(Citation: District Court of NY APT10 Indictment December 2018)` - The URL for the majority of citations can be found in the dataset but we need to do a first pass before to link correctly the citations at their URL. + The URL for the majority of citations can be found in the dataset but we need to do a first pass before to link correctly the citations at their URL. - Moreover, we can find also citation directly in the description of several objects but without URL and without their references in their JSON tree. + Moreover, we can find also citation directly in the description of several objects but without URL and without their references in their JSON tree. - Here, it's a simple tutorial. So, we do not try to fix this problem of citations for the moment, but if you want a tutorial to fix it, let me a comment in this page. + Here, it's a simple tutorial. So, we do not try to fix this problem of citations for the moment, but if you want a tutorial to fix it, let me a comment in this page. -!!! Success + !!! Success - To test your transformer, you need to develop one or several SPARQL queries with the RDF pattern which will use in your use case. You are developing this query in the SPARQL editor: + To test your transformer, you need to develop one or several SPARQL queries with the RDF pattern which will use in your use case. You are developing this query in the SPARQL editor: - ```sparql - #Test 1 transformer STIX 2.1 + ```sparql + #Test 1 transformer STIX 2.1 - PREFIX rdfs: - PREFIX ctia: + PREFIX rdfs: + PREFIX ctia: - SELECT - ?title ?description - (GROUP_CONCAT(?link; separator="
") as ?references) - FROM - WHERE { - { - ?resource ctia:type ctia:course-of-action . - } union { - ?resource ctia:type ctia:attack-pattern . - } + SELECT + ?title ?description + (GROUP_CONCAT(?link; separator="
") as ?references) + FROM + WHERE { + { + ?resource ctia:type ctia:course-of-action . + } union { + ?resource ctia:type ctia:attack-pattern . + } - ?resource rdfs:label ?title ; - ctia:description ?description ; - ctia:external_references ?mitre_url . + ?resource rdfs:label ?title ; + ctia:description ?description ; + ctia:external_references ?mitre_url . - ?mitre_url ctia:external_id "T1490" ; - ctia:source_name "mitre-attack" . + ?mitre_url ctia:external_id "T1490" ; + ctia:source_name "mitre-attack" . - OPTIONAL { - ?resource ctia:external_references [ - ctia:url ?reference_url ; - ctia:source_name ?reference_label ; - ctia:description ?reference_description - ] . - BIND( CONCAT("",?reference_label,": ",?reference_description ,"") as ?link) - } - } - GROUP BY ?title ?description - ``` + OPTIONAL { + ?resource ctia:external_references [ + ctia:url ?reference_url ; + ctia:source_name ?reference_label ; + ctia:description ?reference_description + ] . + BIND( CONCAT("",?reference_label,": ",?reference_description ,"") as ?link) + } + } + GROUP BY ?title ?description + ``` - ![](23-1-sparql-query.gif) + ![](23-1-sparql-query.gif) 9. During the building of interfaces, we saw the same MITRE ID of IoC rules is used by the concepts of tactic, mitigation, technique,... In the final interface, we will print properly the label of each concept for the same Mitre ID, like "Technique TXX" or "Mitigation TXX". -!!! Tip + !!! Tip - Moreover, Corporate Memory indexes some specific properties automatically, like rdfs:label. Without this property, it's not easy to find the objects by a search by text. To facilite the research of references, like the mitre id, you are adding the property rdfs:label to reference objects. + Moreover, Corporate Memory indexes some specific properties automatically, like rdfs:label. Without this property, it's not easy to find the objects by a search by text. To facilite the research of references, like the mitre id, you are adding the property rdfs:label to reference objects. -So, we add a new property `rdfs:label` to object `ctia:Reference`. If the reference is not a Mitre ID, we will copy the source_name else we will extract the type of concept in the URL and concat his Mitre ID: + So, we add a new property `rdfs:label` to object `ctia:Reference`. If the reference is not a Mitre ID, we will copy the source_name else we will extract the type of concept in the URL and concat his Mitre ID: -* In the transformer STIX, add the property rdfs:label (type string) to object `ctia:Reference`. + * In the transformer STIX, add the property rdfs:label (type string) to object `ctia:Reference`. -![](rdf-model-course-of-action.png) + ![](rdf-model-course-of-action.png) -* Customize the value of label, like in this RDF model: (try to do this rule alone before to look at this possible response) + * Customize the value of label, like in this RDF model: (try to do this rule alone before to look at this possible response) -![](23-1-extract-rdfslabel.png) + ![](23-1-extract-rdfslabel.png) -!!! Success + !!! Success - You can test the result when you search the Mitre ID via the explorer of knowledge graph "MA Entreprise": + You can test the result when you search the Mitre ID via the explorer of knowledge graph "MA Entreprise": - + ![](23-1-success-extract-rdfslabel.png) ### Create a workflow @@ -414,20 +414,20 @@ You have now a STIX transformer. We are building here a workflow to apply this t 1. Create a workflow with a name, for example "MITRE ATT&CK® workflow" 2. Insert the input JSON dataset 3. Insert the output RDF dataset -3. Insert the transformer -4. Link the three components -5. Execute the workflow to test it -6. Save it +4. Insert the transformer +5. Link the three components +6. Execute the workflow to test it +7. Save it ![](23-1-create-workflow.gif) -7. Do the same operations for the two other datasets. +8. Do the same operations for the two other datasets. -!!! Success + !!! Success - At the end, the workflow looks like that: + At the end, the workflow looks like that: - ![](23-1-success-worflow.png) + ![](23-1-success-worflow.png) ### Create a global named graph @@ -443,69 +443,69 @@ We are showing the "SPARQL tasks", another important feature available in Corpor 1. Create a "Knowledge Graph" dataset (ie, a RDF dataset) - * Label: MITRE ATT&CK® (knowledge graph) - * URI (name of graph): - * Enable "Clear graph before workflow execution" + * Label: MITRE ATT&CK® (knowledge graph) + * URI (name of graph): + * Enable "Clear graph before workflow execution" 2. Create a "SPARQL Update query" task without missing to enable the Jinja Template - * Label: Import graph - -```sparql -PREFIX owl: - -INSERT DATA { - GRAPH $outputProperties.uri("graph") { - $outputProperties.uri("graph") - owl:imports $inputProperties.uri("graph") . - } -} -``` - -!!! Note - - In this query, Jinja replace $outputProperties.uri("graph") and $inputProperties.uri("graph") according to our workflow so the final code executed of this query is, for example: + * Label: Import graph ```sparql PREFIX owl: INSERT DATA { - GRAPH { - - owl:imports . + GRAPH $outputProperties.uri("graph") { + $outputProperties.uri("graph") + owl:imports $inputProperties.uri("graph") . } } ``` -!!! Success + !!! Note - In the Turtle view of RDF dataset "MITRE ATT&CK®", you can see the triples inserted by your SPARQL query. + In this query, Jinja replace $outputProperties.uri("graph") and $inputProperties.uri("graph") according to our workflow so the final code executed of this query is, for example: - ```turtle - - owl:imports ; - owl:imports ; - owl:imports - . - ``` + ```sparql + PREFIX owl: + + INSERT DATA { + GRAPH { + + owl:imports . + } + } + ``` + + !!! Success + + In the Turtle view of RDF dataset "MITRE ATT&CK®", you can see the triples inserted by your SPARQL query. -1. In the same workflow add one SPARQL task for each RDF datasets and in output add the RDF dataset "MITRE ATT&CK®". Execute it and save it. + ```turtle + + owl:imports ; + owl:imports ; + owl:imports + . + ``` + +3. In the same workflow add one SPARQL task for each RDF datasets and in output add the RDF dataset "MITRE ATT&CK®". Execute it and save it. ![](23-1-sparql-task.gif) -!!! Success + !!! Success - ![](23-1-workflow-import.png) + ![](23-1-workflow-import.png) - In the Turtle view of RDF dataset "MITRE ATT&CK®", you can see the triples inserted by your SPARQL query. + In the Turtle view of RDF dataset "MITRE ATT&CK®", you can see the triples inserted by your SPARQL query. - ```turtle - - owl:imports ; - owl:imports ; - owl:imports - . - ``` + ```turtle + + owl:imports ; + owl:imports ; + owl:imports + . + ``` ### Test your final SPARQL query @@ -563,67 +563,67 @@ Here, we are creating a new SPARQL Update task to calculate and insert automatic 1. In the same workflow, insert a new SPARQL Update task with this query to calculate the statistics: -* label: Calculate VoID + * label: Calculate VoID -```sparql -PREFIX rdfs: -PREFIX dcterms: -prefix void: - -INSERT -{ - GRAPH $outputProperties.uri("graph") { - $outputProperties.uri("graph") a void:Dataset; - rdfs:label "MITRE ATT&CK®"; - rdfs:comment "MITRE ATT&CK® is a globally-accessible knowledge base of adversary tactics and techniques based on real-world observations."; - void:triples ?triples ; - void:entities ?entities . - } -} -USING $outputProperties.uri("graph") -WHERE { + ```sparql + PREFIX rdfs: + PREFIX dcterms: + prefix void: + + INSERT { - SELECT (COUNT(DISTINCT ?resource) as ?entities) - WHERE { - ?resource a ?class . + GRAPH $outputProperties.uri("graph") { + $outputProperties.uri("graph") a void:Dataset; + rdfs:label "MITRE ATT&CK®"; + rdfs:comment "MITRE ATT&CK® is a globally-accessible knowledge base of adversary tactics and techniques based on real-world observations."; + void:triples ?triples ; + void:entities ?entities . } } - { - SELECT (COUNT(?s) as ?triples) - WHERE { - ?s ?p ?o . + USING $outputProperties.uri("graph") + WHERE { + { + SELECT (COUNT(DISTINCT ?resource) as ?entities) + WHERE { + ?resource a ?class . + } + } + { + SELECT (COUNT(?s) as ?triples) + WHERE { + ?s ?p ?o . + } } } -} -``` + ``` -!!! Tip + !!! Tip - This query uses the variable `$outputProperties.uri("graph")` (Jinja template). If the name of graph changes, the code of the query stays stable in your workflow. + This query uses the variable `$outputProperties.uri("graph")` (Jinja template). If the name of graph changes, the code of the query stays stable in your workflow. -![](23-1-sparql-void.gif) + ![](23-1-sparql-void.gif) -!!! Success + !!! Success - The final triples in the graph `https://attack.mitre.org`after this worflow. + The final triples in the graph `https://attack.mitre.org`after this worflow. - ```turtle - prefix owl: - prefix rdf: - prefix rdfs: - prefix xsd: - - - rdf:type ; - rdfs:comment "MITRE ATT&CK® is a globally-accessible knowledge base of adversary tactics and techniques based on real-world observations." ; - rdfs:label "MITRE ATT&CK®" ; - 28081 ; - 150120 ; - owl:imports - , - , - . - ``` + ```turtle + prefix owl: + prefix rdf: + prefix rdfs: + prefix xsd: + + + rdf:type ; + rdfs:comment "MITRE ATT&CK® is a globally-accessible knowledge base of adversary tactics and techniques based on real-world observations." ; + rdfs:label "MITRE ATT&CK®" ; + 28081 ; + 150120 ; + owl:imports + , + , + . + ``` ### Refresh all automatically @@ -633,76 +633,76 @@ The datasets of Mitre are updated regularly. You may want to update them automat 2. Open your config file: -```bash -cmemc config edit -``` + ```bash + cmemc config edit + ``` 3. Insert your sandbox in your CMEMC config, example with a password grant type: -```bash -[johndo.eccenca.my] -CMEM_BASE_URI=https://johndo.eccenca.my/ -OAUTH_GRANT_TYPE=password -OAUTH_CLIENT_ID=cmemc -OAUTH_USER=johndo@example.com -OAUTH_PASSWORD=XXXXXXXXX -``` + ```bash + [johndo.eccenca.my] + CMEM_BASE_URI=https://johndo.eccenca.my/ + OAUTH_GRANT_TYPE=password + OAUTH_CLIENT_ID=cmemc + OAUTH_USER=johndo@example.com + OAUTH_PASSWORD=XXXXXXXXX + ``` -You need to replace "johndo" by other thing, "" by your login (email) in the sandbox and XXXXXXXXX by your password. Save the file (with VI, :wq). + You need to replace "johndo" by other thing, "" by your login (email) in the sandbox and XXXXXXXXX by your password. Save the file (with VI, :wq). -!!! Tip + !!! Tip - Immediatly, in the file ~/.bashrc, you can specify your sandbox like your instance by default for CMEMC with this line: + Immediatly, in the file ~/.bashrc, you can specify your sandbox like your instance by default for CMEMC with this line: - ```bash - export CMEMC_CONNECTION=johndo.eccenca.my - ``` + ```bash + export CMEMC_CONNECTION=johndo.eccenca.my + ``` -Test: + Test: -```bash -cmemc graph list -# or cmemc -c johndo.eccenca.my graph list -``` + ```bash + cmemc graph list + # or cmemc -c johndo.eccenca.my graph list + ``` -If you can connect it, you can see your knowledge graph "" in the list. + If you can connect it, you can see your knowledge graph "" in the list. -4. You need to know the IDs of your JSON datasets IDs and your workflow ID to implement the command lines with the tool [Corporate Memory Console]() ( +4. You need to know the IDs of your JSON datasets IDs and your workflow ID to implement the command lines with `cmemc`. -![](23-1-collect_IDs.gif) + ![](23-1-collect_IDs.gif) -For example in my demo the JSON datasets and the workflow have these IDs: + For example in my demo the JSON datasets and the workflow have these IDs: -``` -MITREATTCK_3dc114458dfd4c57:MAEntrepriseJSON_14f0f94ed5de5daa -MITREATTCK_3dc114458dfd4c57:MAICSJSON_e024c6433ed523e1 -MITREATTCK_3dc114458dfd4c57:MAMobileJSON_3f890442dad17750 + ```txt + MITREATTCK_3dc114458dfd4c57:MAEntrepriseJSON_14f0f94ed5de5daa + MITREATTCK_3dc114458dfd4c57:MAICSJSON_e024c6433ed523e1 + MITREATTCK_3dc114458dfd4c57:MAMobileJSON_3f890442dad17750 -MITREATTCK_3dc114458dfd4c57:MITREATTCKworkflow_0b8fa5454ef21a00 -``` + MITREATTCK_3dc114458dfd4c57:MITREATTCKworkflow_0b8fa5454ef21a00 + ``` 5. You can now import the file directly of Mitre repository on GitHub and import the files in the sandbox and execute your workflow. -```bash -wget https://raw.githubusercontent.com/mitre-attack/attack-stix-data/master/enterprise-attack/enterprise-attack.json -wget https://raw.githubusercontent.com/mitre-attack/attack-stix-data/master/mobile-attack/mobile-attack.json -wget https://raw.githubusercontent.com/mitre-attack/attack-stix-data/master/ics-attack/ics-attack.json - -cmemc dataset download --replace MITREATTCK_3dc114458dfd4c57:MAEntrepriseJSON_14f0f94ed5de5daa enterprise-attack.json -cmemc dataset download --replace MITREATTCK_3dc114458dfd4c57:MAMobileJSON_3f890442dad17750 mobile-attack.json -cmemc dataset download --replace MITREATTCK_3dc114458dfd4c57:MAICSJSON_e024c6433ed523e1 ics-attack.json -cmemc workflow execute --wait MITREATTCK_3dc114458dfd4c57:MITREATTCKworkflow_0b8fa5454ef21a00 -``` + ```bash + wget https://raw.githubusercontent.com/mitre-attack/attack-stix-data/master/enterprise-attack/enterprise-attack.json + wget https://raw.githubusercontent.com/mitre-attack/attack-stix-data/master/mobile-attack/mobile-attack.json + wget https://raw.githubusercontent.com/mitre-attack/attack-stix-data/master/ics-attack/ics-attack.json + + cmemc dataset download --replace MITREATTCK_3dc114458dfd4c57:MAEntrepriseJSON_14f0f94ed5de5daa enterprise-attack.json + cmemc dataset download --replace MITREATTCK_3dc114458dfd4c57:MAMobileJSON_3f890442dad17750 mobile-attack.json + cmemc dataset download --replace MITREATTCK_3dc114458dfd4c57:MAICSJSON_e024c6433ed523e1 ics-attack.json + cmemc workflow execute --wait MITREATTCK_3dc114458dfd4c57:MITREATTCKworkflow_0b8fa5454ef21a00 + ``` -!!! Success + !!! Success - You can see the result in the shell but also via the "Activities Board". It's useful to follow the errors of your workflows, if you execute a script via a Linux Cron, for example. + You can see the result in the shell but also via the "Activities Board". It's useful to follow the errors of your workflows, if you execute a script via a Linux Cron, for example. - ![](23-1-success-cmemc-activity.png) + ![](23-1-success-cmemc-activity.png) -!!! Tip + !!! Tip - With these command lines, you can now start a cron every day to check the Mitre updates and start refreshing your datasets. + With these command lines, you can now start a cron every day to check the Mitre updates and start refreshing your datasets. ## Exercices @@ -714,53 +714,53 @@ After this tutorial, you want probably to navigate in your new knowledge graph b 2. Create a new SPARQL Update task "convert STIX relationships to rdf statements" with this code: -```sparql -PREFIX ctia: + ```sparql + PREFIX ctia: -INSERT { - GRAPH $outputProperties.uri("graph") { - ?sourceIRI ?propertyIRI ?targetIRI . - } -} -WHERE { - GRAPH $inputProperties.uri("graph") { - ?relationship - ctia:type ctia:relationship ; - ctia:source_ref ?source ; - ctia:target_ref ?target ; - ctia:relationship_type ?property . + INSERT { + GRAPH $outputProperties.uri("graph") { + ?sourceIRI ?propertyIRI ?targetIRI . + } } + WHERE { + GRAPH $inputProperties.uri("graph") { + ?relationship + ctia:type ctia:relationship ; + ctia:source_ref ?source ; + ctia:target_ref ?target ; + ctia:relationship_type ?property . + } - BIND (IRI(CONCAT("https://github.com/mitre-attack/attack-stix-data#",STR(?source))) as ?sourceIRI) - BIND (IRI(CONCAT("https://github.com/mitre/cti/blob/master/USAGE.md#",STR(?property))) as ?propertyIRI) - BIND (IRI(CONCAT("https://github.com/mitre-attack/attack-stix-data#",STR(?target))) as ?targetIRI) -} -``` + BIND (IRI(CONCAT("https://github.com/mitre-attack/attack-stix-data#",STR(?source))) as ?sourceIRI) + BIND (IRI(CONCAT("https://github.com/mitre/cti/blob/master/USAGE.md#",STR(?property))) as ?propertyIRI) + BIND (IRI(CONCAT("https://github.com/mitre-attack/attack-stix-data#",STR(?target))) as ?targetIRI) + } + ``` -This SPARQL query create explicitly the STIX links in the knowledge graph. Here, we create a new inference via a simple query. + This SPARQL query create explicitly the STIX links in the knowledge graph. Here, we create a new inference via a simple query. 3. Create a new Knowledge graph dataset "STIX inferences" with this IRI: -!!! Tip + !!! Tip - Separate always the facts extracted of raw data and the inferences calculate with other graphs. So, you can recalculate your inferences without rebuild all knowledge graph. + Separate always the facts extracted of raw data and the inferences calculate with other graphs. So, you can recalculate your inferences without rebuild all knowledge graph. 4. Split the workflow in two workflows: - * "Transform all STIX data to RDF" to calculate the inferences after RDF triples - ![](23-1-ex-workflow-STIX.png) + * "Transform all STIX data to RDF" to calculate the inferences after RDF triples + ![](23-1-ex-workflow-STIX.png) - * "Assemble the global knowledge graph", it will import all the graphs of projects - ![](23-1-ex-workflow-gen.png) + * "Assemble the global knowledge graph", it will import all the graphs of projects + ![](23-1-ex-workflow-gen.png) 5. Create a new workflow "MITRE ATT&CK® workflow" where you will insert the other workflows, like that: ![](23-1-ex-workflow-global.png) -!!! Success + !!! Success - You can now navigate in your first knowledge graph: - ![](23-1-ex-graph-navigation.gif) + You can now navigate in your first knowledge graph: + ![](23-1-ex-graph-navigation.gif) ### Reconcile automatically the STIX concepts via the Linking tasks @@ -774,8 +774,8 @@ This SPARQL query create explicitly the STIX links in the knowledge graph. Here, The Common Attack Pattern Enumeration and Classification (CAPEC™) effort provides a publicly available catalog of common attack patterns that helps users understand how adversaries exploit weaknesses in applications and other cyber-enabled capabilities. -* Dataset: [https://github.com/mitre/cti/blob/master/capec/2.1/stix-capec.json](https://github.com/mitre/cti/blob/master/capec/2.1/stix-capec.json) -* The CAPEC "ontology": [https://github.com/mitre/cti/blob/master/USAGE-CAPEC.md](https://github.com/mitre/cti/blob/master/USAGE-CAPEC.md) +* Dataset: [https://github.com/mitre/cti/blob/master/capec/2.1/stix-capec.json](https://github.com/mitre/cti/blob/master/capec/2.1/stix-capec.json) +* The CAPEC "ontology": [https://github.com/mitre/cti/blob/master/USAGE-CAPEC.md](https://github.com/mitre/cti/blob/master/USAGE-CAPEC.md) 1. Import the CAPEC dataset in Corporate Memory 2. Create the named graph of CAPEC @@ -788,8 +788,8 @@ STIX uses JSON syntax and can therefore be converted to RDF via Corporate Memory ## Ressources -* [RDF schemas (Model, pattern, etc)](RDF_model_and_pattern.drawio) -* [Archive of CMEM project](MITREATTCK_tutorial.zip) +* [RDF schemas (Model, pattern, etc)](RDF_model_and_pattern.drawio) +* [Archive of CMEM project](MITREATTCK_tutorial.zip) --- diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-YAML-data-of-hayabusa-sigma/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-YAML-data-of-hayabusa-sigma/index.md index 7aa2e66e0..e0acf66a4 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-YAML-data-of-hayabusa-sigma/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-YAML-data-of-hayabusa-sigma/index.md @@ -8,8 +8,8 @@ There are rules for Host-based intrusion detection systems (HIDS) with Hayabusa/ Here, we are working with the Hayabusa/Sigma rules available via GitHub: -- [https://github.com/Yamato-Security/hayabusa-rules](https://github.com/Yamato-Security/hayabusa-rules) -- [https://github.com/SigmaHQ/sigma](https://github.com/Yamato-Security/hayabusa-rules) +- [https://github.com/Yamato-Security/hayabusa-rules](https://github.com/Yamato-Security/hayabusa-rules) +- [https://github.com/SigmaHQ/sigma](https://github.com/Yamato-Security/hayabusa-rules) The problem of interoperability, here, is the YAML format of files, their random position in their folders in their Github projets. Moreover, the same rule can exist in different projects but in this tutorial, we will not fix this problem and we consider the IRI rule is their Web address. In Corporate Memory, we would fix that with the Linked Tool, we will study this tool in a next part of this tutorial. @@ -113,26 +113,26 @@ This new transformer are building the following RDF model for your use case: Rule object: -- type: `ctis:Rule` +- type: `ctis:Rule` -- IRI: concatenation of "" with the result of this regular expression `^.*?([^\/]*)$` on the rule path +- IRI: concatenation of "" with the result of this regular expression `^.*?([^\/]*)$` on the rule path ![](23-1-iri-rule.png) -- property `ctis:filename` with the result of this regular expression `^.*?([^\/]*)$` on the value path `rulePath` -- property `rdfs:label` with the value path `title` -- property `rdfs:comment` with the value path `description` -- property `rdfs:seeAlso` with the value path `references` -- property `ctis:mitreAttackTechniqueId` is building with this formula with the value path `tags` - - Filter by regex: `^attack\.t\d+$` - - Regex replace `attack\.t` by `T` +- property `ctis:filename` with the result of this regular expression `^.*?([^\/]*)$` on the value path `rulePath` +- property `rdfs:label` with the value path `title` +- property `rdfs:comment` with the value path `description` +- property `rdfs:seeAlso` with the value path `references` +- property `ctis:mitreAttackTechniqueId` is building with this formula with the value path `tags` + - Filter by regex: `^attack\.t\d+$` + - Regex replace `attack\.t` by `T` ![](23-1-formula-mitreid.png) -- property `rdfs:isDefinedBy` on the value path `rulePath` is building with this formula to link the rules to their Web addresses. - - Add two "Regex replace" - - replace `\./hayabusa-rules/` by `https://github.com/Yamato-Security/hayabusa-rules/blob/main/` - - replace `\./sigma/` by `https://github.com/SigmaHQ/sigma/blob/master/` +- property `rdfs:isDefinedBy` on the value path `rulePath` is building with this formula to link the rules to their Web addresses. + - Add two "Regex replace" + - replace `\./hayabusa-rules/` by `https://github.com/Yamato-Security/hayabusa-rules/blob/main/` + - replace `\./sigma/` by `https://github.com/SigmaHQ/sigma/blob/master/` ![](23-1-rules-isdefinedby.png) @@ -159,7 +159,7 @@ And don't forget to allow the replacement of JSON dataset because it allows to r ![](23-1-add-worflow.gif) -7. Copy the workflow ID +1. Copy the workflow ID ![](23-1-id-worflow.gif) @@ -232,9 +232,9 @@ Here, we learnt how to generate a knowledge graph with files in input with Corpo ## Ressources -- [RDF schemas (Model, pattern, etc)](RDF_model_and_pattern.drawio) -- [script 1](importRules.sh) -- [script 2](importRules2.sh) +- [RDF schemas (Model, pattern, etc)](RDF_model_and_pattern.drawio) +- [script 1](importRules.sh) +- [script 2](importRules2.sh) --- diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/link-IDS-event-to-KG-via-cmem/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/link-IDS-event-to-KG-via-cmem/index.md index 84b024c6c..cc9e47626 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/link-IDS-event-to-KG-via-cmem/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/link-IDS-event-to-KG-via-cmem/index.md @@ -6,9 +6,10 @@ In this tutorial, we are using the Splunk app "Investigate lateral movements wit In the demo of this Splunk app via the video 1, the user selects the data about one investigation via Splunk and generate a bash script to export these data via the Splunk API in tempory graphs in Corporate Memory for each investigation. -![](splunk-app-demo-poc-app.gif) - -*Video 1: Splunk dashboards of the Splunk app "Investigate lateral movements with a knowledge graph"* +
+![Video 1: Splunk dashboards of the Splunk app "Investigate lateral movements with a knowledge graph"](splunk-app-demo-poc-app.gif) +
Video 1: Splunk dashboards of the Splunk app "Investigate lateral movements with a knowledge graph"
+
In this page, we are showing how, we : @@ -27,7 +28,7 @@ This app is not directly connected to your Corporate Memory instance. The custom Position of these scripts in the folders of this app: -``` +```shell +---bin | Investigation.py +---cmem @@ -41,43 +42,49 @@ Moreover, a settings file is necessary to insert the credentials of Splunk (like In this Proof of Concept, we have implemented two types of investigation: -- high-level with the data of alerts of Zeek and Hayabusa/Sigma -- low-level with the data of Suricata and Sysmon +- high-level with the data of alerts of Zeek and Hayabusa/Sigma +- low-level with the data of Suricata and Sysmon For each investigation, an analyst selects and navigates in the data with two dashboards for each type of investigation: -- one dashboard (see figure 1) to select the data to transfer to knowledge graph: typeA_request.xml -- one dashboard (see figure 2,3,4) to navigate in the knowledge graph: typeA_dashboard.xml - -![](poc-app_request.png) - -*Figure 1: Splunk dashboard to select the data before executing a high-level investigation* +- one dashboard (see figure 1) to select the data to transfer to knowledge graph: typeA_request.xml +- one dashboard (see figure 2,3,4) to navigate in the knowledge graph: typeA_dashboard.xml -![](poc-app_high_level_computers.png) +
+![Figure 1: Splunk dashboard to select the data before executing a high-level investigation](poc-app_request.png) +
Figure 1: Splunk dashboard to select the data before executing a high-level investigation
+
-*Figure 2: High-level investigation dashboard with the list of computers implicated in the incident* +
+![Figure 2: High-level investigation dashboard with the list of computers implicated in the incident](poc-app_high_level_computers.png) +
Figure 2: High-level investigation dashboard with the list of computers implicated in the incident
+
-![](poc-app_high_level_period.png) +
+![Figure 3: High-level investigation dashboard with the panel to select a specific period during an incident according to IoCs details](poc-app_high_level_period.png) +
Figure 3: High-level investigation dashboard with the panel to select a specific period during an incident according to IoCs details
+
-*Figure 3: High-level investigation dashboard with the panel to select a specific period during an incident according to IoCs details* - -![](poc-app_low_level.png) - -*Figure 4: low-level investigation dashboard contains the command lines of Windows processus rised Suricata alerts during the period selected by the analyst in a high-level investigation dashboard.* +
+![Figure 4: low-level investigation dashboard contains the command lines of Windows processus rised Suricata alerts during the period selected by the analyst in a high-level investigation dashboard](poc-app_low_level.png) +
Figure 4: low-level investigation dashboard contains the command lines of Windows processus rised Suricata alerts during the period selected by the analyst in a high-level investigation dashboard
+
The idea is the analyst can do an investigation high-level without using a lot of ressources (a little graph) but when he want to see the suspicious processus on one computer in a specific period, he can ask an investigation low-level with a maximum of details. To follow the calculation of investigations and free memory when one investigation is closed, we developed another dashboard "investigation_list.xml" (see figure 5). This dashboard prints the status of investigations actually in the knowledge graph. Here, each investigation is saved in tempory graphs and the analyst can create and delete them directly in Splunk. -![](poc-app_list_investigations.png) - -*Figure 5: The dashboard "investigation list" shows all the tempory graph actually in the knowledge graph. The analyst can open an investigation, see the SPL query generated when he has created an investigation and delete it when he want.* +
+![Figure 5: The dashboard "investigation list" shows all the tempory graph actually in the knowledge graph. The analyst can open an investigation, see the SPL query generated when he has created an investigation and delete it when he want.](poc-app_list_investigations.png) +
Figure 5: The dashboard "investigation list" shows all the tempory graph actually in the knowledge graph. The analyst can open an investigation, see the SPL query generated when he has created an investigation and delete it when he want.
+
With these interfaces to manage and calculate different investigations with different levels of details, we imagined a first method to "follow lateral movements" (see figure 6) in order to understand the objectives of the incident. We hope this PoC will "Accelerate Cyber Threat Hunting". -![](poc-app_hunt.png) - -*Figure 6: Analyst can select a computer and a period to analyze the suspicious processus implicated in Suricata alerts. So, an analyst can follow the "lateral movements" and see the command lines executed by these suspicious processus.* +
+![Figure 6: Analyst can select a computer and a period to analyze the suspicious processus implicated in Suricata alerts. So, an analyst can follow the "lateral movements" and see the command lines executed by these suspicious processus.](poc-app_hunt.png) +
Figure 6: Analyst can select a computer and a period to analyze the suspicious processus implicated in Suricata alerts. So, an analyst can follow the "lateral movements" and see the command lines executed by these suspicious processus.
+
## Manage the graphs of your application @@ -97,9 +104,9 @@ This manner to manage the graphs has been applied in this app, so an analyst is This token contains a json object where: -- "index" the list of splunk indexes of the SPL query -- "search" the second part of the SPL query -- "workflowID" the ID of workflow in Corporate Memory to convert the raw data of Splunk in RDF in a tempory graph +- "index" the list of splunk indexes of the SPL query +- "search" the second part of the SPL query +- "workflowID" the ID of workflow in Corporate Memory to convert the raw data of Splunk in RDF in a tempory graph An analyst can import as many sources as needed with several tokens, ie. "source_1", "source_2", etc. @@ -132,8 +139,8 @@ There are not consensus about the manner to calculate inferences on the RDF data We use two manners to calculate new inferences: -- when the inference is simple to calculate with SPARQL, we use a SPARQL update query with parameters in Corporate Memory (like "inferenceID" to build the name of destination tempory graph). "queryIRI" is the IRI of the query in the catalog of Corporate Memory. -- when the inference is complex to calculate, we use a workflow of Corporate Memory. +- when the inference is simple to calculate with SPARQL, we use a SPARQL update query with parameters in Corporate Memory (like "inferenceID" to build the name of destination tempory graph). "queryIRI" is the IRI of the query in the catalog of Corporate Memory. +- when the inference is complex to calculate, we use a workflow of Corporate Memory. With these tokens "source_1", "source_2", etc and "inferences" in the dashboard, the app can generate a bash script for CMEMC. diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/link-IDS-event-to-KG/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/link-IDS-event-to-KG/index.md index 98d7fabc4..776a74185 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/link-IDS-event-to-KG/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/link-IDS-event-to-KG/index.md @@ -6,9 +6,10 @@ In this tutorial, we are using the Linked Data App for Splunk. This app contains In the demo of this app in the video 1, the user selects the indexes of his investigation and select an alert message to open its sources on the Web before searching manually via the Splunk interfaces. Splunk, automatically, refreshes the SPARQL queries in the dashboard after each interaction of user. -![](splunk-app-demo-LD-app.gif) - -*Video 1: Splunk dashboards of the Linked Data App* +
+![Video 1: Splunk dashboards of the Linked Data App](splunk-app-demo-LD-app.gif) +
Video 1: Splunk dashboards of the Linked Data App
+
In this tutorial, we learn to: @@ -25,15 +26,17 @@ The "Linked Data App" extends Splunk Search Processing Language (SPL) to support 2. Open the App window in Splunk via the icon "tools" (see figure 1) -![](splunk_apps_menu.png) - -*Figure 1: In the top of the list of installed Splunk apps, you need to click on the icon "tools" to open the window to manage your apps* - -3. Upload the app in Splunk (see video 2) +
+![Figure 1: In the top of the list of installed Splunk apps, you need to click on the icon "tools" to open the window to manage your apps](splunk_apps_menu.png) +
Figure 1: In the top of the list of installed Splunk apps, you need to click on the icon "tools" to open the window to manage your apps
+
-![](splunk-app-install.gif) +1. Upload the app in Splunk (see video 2) -*Video 2: When the tar.gz of the "Linked Data App", you can upload it manually directly in Splunk.* +
+![Video 2: When the tar.gz of the "Linked Data App", you can upload it manually directly in Splunk](splunk-app-install.gif) +
Video 2: When the tar.gz of the "Linked Data App", you can upload it manually directly in Splunk
+
!!! Tip @@ -61,7 +64,7 @@ vi default/settings.conf You have an example of configuration for the eccenca sandbox SPARQL endpoint in the file `default/settings_template_sandbox.conf` (and another example via Oauth2 secret ID in the file `default/settings_template_oauth_secret_id.conf`). -2. Insert your credentials in the the file `settings.conf`, ie. replace `johndo` by the name of your sandbox (endpointRead, token_endpoint), `johndo@example.com` by your email and `XXXXXXXXX` by your password. Don't change the parameters OAUTH_CLIENT_ID and OAUTH_GRANT_TYPE. +1. Insert your credentials in the the file `settings.conf`, ie. replace `johndo` by the name of your sandbox (endpointRead, token_endpoint), `johndo@example.com` by your email and `XXXXXXXXX` by your password. Don't change the parameters OAUTH_CLIENT_ID and OAUTH_GRANT_TYPE. ```ini [config:default] @@ -78,11 +81,11 @@ OAUTH_USER=johndo@example.com OAUTH_PASSWORD=XXXXXXXXX ``` -3. Restart after your Splunk instance (via the administration windows) +1. Restart after your Splunk instance (via the administration windows) -4. Test your sandbox endpoint in Splunk with this SPL query: +2. Test your sandbox endpoint in Splunk with this SPL query: -``` +```spl | sparql query=" select * @@ -104,7 +107,7 @@ endpointRead=https://query.wikidata.org/sparql Restart after your Splunk instance and request in Splunk your endpoint with the parameter config (here wikidata) to select the config to use in the file `settings.conf`: -``` +```spl | sparql config="wikidata" query=" @@ -124,9 +127,10 @@ Restart after your Splunk instance and request in Splunk your endpoint with the To work, our example of dashboard need to have Splunk indexes of IoCs. We cannot share our indexes but you can modify our example with your own SPL queries according to your Splunk indexes. -![](demo_ld.png) - -*Figure 2: Dashboard with SPARQL commands and the script `table_html.js` to print the HTML and to open Web pages of alerts' references* +
+![Figure 2: Dashboard with SPARQL commands and the script `table_html.js` to print the HTML and to open Web pages of alerts' references](demo_ld.png) +
Figure 2: Dashboard with SPARQL commands and the script `table_html.js` to print the HTML and to open Web pages of alerts' references
+
!!! Tip diff --git a/docs/build/variables/index.md b/docs/build/variables/index.md index 0aafd594c..7ffeca6cc 100644 --- a/docs/build/variables/index.md +++ b/docs/build/variables/index.md @@ -13,17 +13,17 @@ These variables define various aspects of the integration tasks, such as the sou The variables are not technically typed. They can be used in most Build configuration and input fields that take inputs of the following data types: -- simple text/string parameters (any string), -- integer parameters (any integer), -- and boolean values (`true`/`false`). +- simple text/string parameters (any string), +- integer parameters (any integer), +- and boolean values (`true`/`false`). Two kinds of variables can be defined: -**Global variables** +`Global variables` : It is defined by the administrator in the configuration file at deployment time and cannot be set by a normal user. -**Project variables (User-defined)** +`Project variables (User-defined)` : It is defined by the user in the UI. Project variables can only be used in the same project. @@ -189,4 +189,3 @@ Click on the symbol **{#}** it turns blue in color. It means the variable's feat In order to allow the automation of activities with build variables from external processes, the Corporate Memory command line interfaces cmemc has a dedicated [`project variable` command group](../../automate/cmemc-command-line-interface/command-reference/project/variable/index.md) for this. Please have a look at command group documentation to learn how to use these commands. - diff --git a/docs/build/workflow-reconfiguration/index.md b/docs/build/workflow-reconfiguration/index.md index 80e300ae7..1321f61e5 100644 --- a/docs/build/workflow-reconfiguration/index.md +++ b/docs/build/workflow-reconfiguration/index.md @@ -57,4 +57,3 @@ After this is done, you can reconfigure any workflow operator that uses this par Tutorials that showcase this function in an application context: - [Loading JDBC datasets incrementally](../loading-jdbc-datasets-incrementally/index.md) - diff --git a/docs/consume/consume-graphs-in-apache-kafka/index.md b/docs/consume/consume-graphs-in-apache-kafka/index.md index 69b54dd9d..a0e6d165e 100644 --- a/docs/consume/consume-graphs-in-apache-kafka/index.md +++ b/docs/consume/consume-graphs-in-apache-kafka/index.md @@ -41,4 +41,3 @@ Once you installed the package, you can use the Kafka Producer by simply creatin ![Create new Item and search for `kafka`](create-new-item-kafka.png "Create new Item and search for `kafka`") Follow the in-app documentation on how to configure the task (e.g. for providing credentials or preparing data to be sent in messages). - diff --git a/docs/consume/consuming-graphs-in-power-bi/index.md b/docs/consume/consuming-graphs-in-power-bi/index.md index d21d2c33d..68f2d7a8b 100644 --- a/docs/consume/consuming-graphs-in-power-bi/index.md +++ b/docs/consume/consuming-graphs-in-power-bi/index.md @@ -18,14 +18,14 @@ The latest (unsigned) version of our Power-BI-Connector is available from its so - [eccenca github.com repository](https://github.com/eccenca/power-bi-connector/tags) (unsigned .mez file)![release](https://img.shields.io/github/release-date/eccenca/power-bi-connector?style=plastic) ![tag](https://img.shields.io/github/v/tag/eccenca/power-bi-connector?style=plastic) - [eccenca Corporate Memory Releases](https://releases.eccenca.com/power-bi-connector/) (signed .pqx file) - - Thumbprint of the signature: **FB6C562BD0B08107AAA420EDDE94507420C7FE1A** + - Thumbprint of the signature: **FB6C562BD0B08107AAA420EDDE94507420C7FE1A** ## Installation - Download the `.pqx` or `.mez` file from the locations linked above. - Move the file into the folder `Documents\Power BI Desktop\Custom Connectors` . - - Create the folder if it does not exist. - - In case you are running Windows on Parallels Desktop: Do not use the Local `Disk\Users\UserName\Documents` folder but your shared folder with macOS. + - Create the folder if it does not exist. + - In case you are running Windows on Parallels Desktop: Do not use the Local `Disk\Users\UserName\Documents` folder but your shared folder with macOS. - Register the Thumbprint (for .pqx) or setup PowerBI Desktop to allow any 3rd party connector (for .pqx or .mez) *(we recommend to register the Thumbprint)* ??? Setup diff --git a/docs/consume/consuming-graphs-with-redash/index.md b/docs/consume/consuming-graphs-with-redash/index.md index b3aa23873..331f716ff 100644 --- a/docs/consume/consuming-graphs-with-redash/index.md +++ b/docs/consume/consuming-graphs-with-redash/index.md @@ -40,4 +40,3 @@ To get familiar with Redash, please have a look at the Redash user guide, especi In order to query eccenca Corporate Memory data sources in Redash, you have to formulate your query with SPARQL: ![Redash: Query with SPARQL](3-edit-query.png "Redash: Query with SPARQL") - diff --git a/docs/consume/index.md b/docs/consume/index.md index b2f5fd708..6e4278ee9 100644 --- a/docs/consume/index.md +++ b/docs/consume/index.md @@ -17,44 +17,42 @@ Since not all applications allow the direct use of SPARQL, this section includes **:octicons-people-24: Intended audience**: Linked Data Experts -
-- :other-powerbi: [Power BI](consuming-graphs-in-power-bi/index.md) +- :other-powerbi: [Power BI](consuming-graphs-in-power-bi/index.md) --- Learn how to consume data from your Corporate Memory Knowledge Graph with our Microsoft Power-BI-Connector. -- :material-view-dashboard: [Redash](consuming-graphs-with-redash/index.md) +- :material-view-dashboard: [Redash](consuming-graphs-with-redash/index.md) --- Create Dashboards based on your Knowledge Graphs with the open-source application Redash. -- :eccenca-artefact-dataset-sqlendpoint: [SQL Databases](consuming-graphs-with-sql-databases/index.md) +- :eccenca-artefact-dataset-sqlendpoint: [SQL Databases](consuming-graphs-with-sql-databases/index.md) --- If direct access to the knowledge graph is not sufficient, fragments of the Knowledge Graph may also be pushed into external SQL databases. -- :material-api: [Custom APIs](provide-data-in-any-format-via-a-custom-api/index.md) +- :material-api: [Custom APIs](provide-data-in-any-format-via-a-custom-api/index.md) --- Learn how to provide data via a customized Corporate Memory API in a text format of your choice and how to consume it in your applications. -- :simple-neo4j: [Neo4j](populate-data-to-neo4j/index.md) +- :simple-neo4j: [Neo4j](populate-data-to-neo4j/index.md) --- Learn how to populate graphs to Neo4j. -- :material-apache-kafka: [Apache Kafka](consume-graphs-in-apache-kafka/index.md) +- :material-apache-kafka: [Apache Kafka](consume-graphs-in-apache-kafka/index.md) --- Use a Apache Kafka Producer in order to export parts of your Knowledge Graph as a message stream.
- diff --git a/docs/consume/populate-data-to-neo4j/index.md b/docs/consume/populate-data-to-neo4j/index.md index 8e190ba35..d29c7ae79 100644 --- a/docs/consume/populate-data-to-neo4j/index.md +++ b/docs/consume/populate-data-to-neo4j/index.md @@ -37,9 +37,9 @@ To upload multiple files together as an input: 1. In Corporate Memory, click Projects under **BUILD** in the navigation on the left side of the page. ![Menu > BUILD > Projects](pdtn-menu-projects.png){width="50%"} -2. Click Create at the top of the page.  +2. Click Create at the top of the page. 3. In Create new item window, select Project and click Add. The Create new item of type Project window appears. -4. Fill in the required details such as Title and Description.  Alternatively, import the existing project by clicking Import Project File and selecting the file from your system.  +4. Fill in the required details such as Title and Description.  Alternatively, import the existing project by clicking Import Project File and selecting the file from your system. 5. Click Create. Your project (Northwind) is created. ![Project Northwind](pdtn-project-northwind.png) 6. In your project, click Create Item. @@ -57,7 +57,7 @@ To upload multiple files together as an input: A Neo4j dataset holding a Labeled Property Graph (LPG) representation is one of the outputs of the process. Perform the following steps to create a Neo4j dataset: -1. In your existing project, click Create to create a new item.  +1. In your existing project, click Create to create a new item. 2. In the item category Dataset select Neo4j. ![Create Neo4j dataset](pdtn-create-dataset-neo4j.png) 3. Click Add. @@ -248,7 +248,7 @@ To integrate all the transformations, perform the following steps: 1. Navigate to the project. 2. Click Create -3. In the Create new item window, select Workflow and click Add.  +3. In the Create new item window, select Workflow and click Add. 4. In the Create new item of type Transform window, for this tutorial, enter the following: - Label: _Northwind Workflow_ - Click Create @@ -273,4 +273,3 @@ Optionally, you can use the same transformation and workflow to render the resul ![Add Knowledge Graph output](pdtn-add-kg-dataset-into-workflow.png) 2. The results can then be reviewed in the Knowledge Graph module, e.g., explored visually: ![Results in Corporate Memory](pdtn-ontodia-result-view.png) - diff --git a/docs/consume/provide-data-in-any-format-via-a-custom-api/index.md b/docs/consume/provide-data-in-any-format-via-a-custom-api/index.md index b0209425e..4ac61c3d0 100644 --- a/docs/consume/provide-data-in-any-format-via-a-custom-api/index.md +++ b/docs/consume/provide-data-in-any-format-via-a-custom-api/index.md @@ -176,4 +176,3 @@ A non-streaming result set (the SPARQL query) is limited to 1000 elements. If more results are expected *Is Streaming* should be set to true. If *Is Streaming* is set to `true` the Jinja Template has to resolve a `result` variable (without the '`s`'), which is a single query result. The template engine iterates over the results, i.e. the Body template is repeated for each query result. - diff --git a/docs/deploy-and-configure/configuration/access-conditions/index.md b/docs/deploy-and-configure/configuration/access-conditions/index.md index 9029daa16..aebc6e650 100644 --- a/docs/deploy-and-configure/configuration/access-conditions/index.md +++ b/docs/deploy-and-configure/configuration/access-conditions/index.md @@ -10,7 +10,7 @@ tags: Access Conditions specify access rights for users and groups to graphs and actions (1). { .annotate } -1. Graphs identify specific Knowledge Graphs. +1. Graphs identify specific Knowledge Graphs. Actions identify specific parts or components of the platform, such as the query catalog or the data integration system (Build). Access Conditions are managed in a special system graph, so write access to this graph needs to be handled carefully. @@ -192,7 +192,6 @@ This screen is split into two main areas: - Second, the list of all access conditions which contributed to the effective access rights. This section allows to see which access conditions matched the principal and which access rights they grant. - ### Command line interface With [cmemc](../../../automate/cmemc-command-line-interface/index.md) you can use an additional command line based interface to manage access conditions. @@ -205,7 +204,6 @@ The important command groups for managing principals and access conditions are: The following session demonstrates how to create a new user, set a password and grant access to certain areas. - ``` shell-session $ cmemc admin acl list No access conditions found. Use the `admin acl create` command to create a new access condition. @@ -314,4 +312,3 @@ The following list, provides grants which work together: - Allowed Actions: **Build - Workspace** (`:Build`) - Allow write graphs: - **All Graphs** (`https://vocab.eccenca.com/auth/AllGraphs`) - diff --git a/docs/deploy-and-configure/configuration/caveats/index.md b/docs/deploy-and-configure/configuration/caveats/index.md index 3656a8340..322f2b9d2 100644 --- a/docs/deploy-and-configure/configuration/caveats/index.md +++ b/docs/deploy-and-configure/configuration/caveats/index.md @@ -63,6 +63,6 @@ For Explore backend (DataPlatform) you have to set this in `application.yaml` or ### Useful Documentation -- [AWS VPN Documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/networking-connecting-services.html) -- [AWS Network load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/network-load-balancers.html) -- [GraphDB with load balancers](https://graphdb.ontotext.com/documentation/10.7/aws-deployment.html#setting-up-the-load-balancer) +- [AWS VPN Documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/networking-connecting-services.html) +- [AWS Network load Balancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/network-load-balancers.html) +- [GraphDB with load balancers](https://graphdb.ontotext.com/documentation/10.7/aws-deployment.html#setting-up-the-load-balancer) diff --git a/docs/deploy-and-configure/configuration/dataintegration/activity-reference/index.md b/docs/deploy-and-configure/configuration/dataintegration/activity-reference/index.md index 7ef90a29d..c78da92d1 100644 --- a/docs/deploy-and-configure/configuration/dataintegration/activity-reference/index.md +++ b/docs/deploy-and-configure/configuration/dataintegration/activity-reference/index.md @@ -16,7 +16,6 @@ The following activities are available for each project. Generates matches between schema paths and datasets based on the schema discovery and profiling information of the datasets. - | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | | datasetUri | String | If set, run dataset matching only for this particular dataset. | @@ -25,8 +24,6 @@ The identifier for this plugin is `DatasetMatcher`. It can be found in the package `com.eccenca.di.datamatching`. - - ## Task Activities The following activities are available for different types of tasks. @@ -42,8 +39,6 @@ The identifier for this plugin is `ExecuteRestTask`. It can be found in the package `com.eccenca.di.workflow.operators.rest`. - - ### Dataset #### Dataset profiler @@ -64,8 +59,6 @@ The identifier for this plugin is `DatasetProfiler`. It can be found in the package `com.eccenca.di.profiling`. - - #### SQL endpoint status Shows the SQL endpoint status. @@ -75,8 +68,6 @@ The identifier for this plugin is `SqlEndpointStatus`. It can be found in the package `com.eccenca.di.sql.spark.endpoint.activity`. - - #### Types cache Holds the most frequent types in a dataset. @@ -86,8 +77,6 @@ The identifier for this plugin is `TypesCache`. It can be found in the package `org.silkframework.workspace.activity.dataset`. - - ### LinkSpecification #### Active learning @@ -102,8 +91,6 @@ The identifier for this plugin is `ActiveLearning`. It can be found in the package `org.silkframework.learning.active`. - - #### Active learning (find comparison pairs) Suggest comparison pairs for the current linking task. @@ -116,8 +103,6 @@ The identifier for this plugin is `ActiveLearning-ComparisonPairs`. It can be found in the package `org.silkframework.learning.active.comparisons`. - - #### Evaluate linking Evaluates the linking task by generating links. @@ -136,8 +121,6 @@ The identifier for this plugin is `EvaluateLinking`. It can be found in the package `org.silkframework.workspace.activity.linking`. - - #### Execute linking Executes the linking task using the configured execution. @@ -147,8 +130,6 @@ The identifier for this plugin is `ExecuteLinking`. It can be found in the package `org.silkframework.workspace.activity.linking`. - - #### Linking paths cache Holds the most frequent paths for the selected entities. @@ -158,8 +139,6 @@ The identifier for this plugin is `LinkingPathsCache`. It can be found in the package `org.silkframework.workspace.activity.linking`. - - #### Reference entities cache For each reference link, the reference entities cache holds all values of the linked entities. @@ -169,8 +148,6 @@ The identifier for this plugin is `ReferenceEntitiesCache`. It can be found in the package `org.silkframework.workspace.activity.linking`. - - ### Scheduler #### Activate @@ -182,8 +159,6 @@ The identifier for this plugin is `ExecuteScheduler`. It can be found in the package `com.eccenca.di.scheduler`. - - ### ScriptTask #### Execute Script @@ -195,8 +170,6 @@ The identifier for this plugin is `ExecuteScript`. It can be found in the package `com.eccenca.di.scripting.scala`. - - ### TransformSpecification #### Execute transform @@ -211,8 +184,6 @@ The identifier for this plugin is `ExecuteTransform`. It can be found in the package `org.silkframework.workspace.activity.transform`. - - #### Transform paths cache Holds the most frequent paths for the selected entities. @@ -222,8 +193,6 @@ The identifier for this plugin is `TransformPathsCache`. It can be found in the package `org.silkframework.workspace.activity.transform`. - - #### Target vocabulary cache Holds the target vocabularies @@ -233,8 +202,6 @@ The identifier for this plugin is `VocabularyCache`. It can be found in the package `org.silkframework.workspace.activity.transform`. - - ### Workflow #### Execute locally @@ -246,8 +213,6 @@ The identifier for this plugin is `ExecuteLocalWorkflow`. It can be found in the package `org.silkframework.workspace.activity.workflow`. - - ### WorkflowExecution #### Generate Spark assembly @@ -264,8 +229,6 @@ The identifier for this plugin is `DeploySparkWorkflow`. It can be found in the package `com.eccenca.di.spark`. - - #### Default execution Executes a workflow with the executor defined in the configuration @@ -275,8 +238,6 @@ The identifier for this plugin is `ExecuteDefaultWorkflow`. It can be found in the package `com.eccenca.di.spark`. - - #### Execute operator Executes a workflow on with an executor that uses Apache Spark. Depending on the Spark configuration it can still run on a single local machine or on a cluster. @@ -289,8 +250,6 @@ The identifier for this plugin is `ExecuteSparkOperator`. It can be found in the package `com.eccenca.di.spark`. - - #### Execute on Spark Executes a workflow on with an executor that uses Apache Spark. Depending on the Spark configuration it can still run on a single local machine or on a cluster. @@ -300,8 +259,6 @@ The identifier for this plugin is `ExecuteSparkWorkflow`. It can be found in the package `com.eccenca.di.spark`. - - #### Execute with payload Executes a workflow with custom payload. @@ -316,8 +273,6 @@ The identifier for this plugin is `ExecuteWorkflowWithPayload`. It can be found in the package `org.silkframework.workbench.workflow`. - - #### Generate view Generate and share a view on a workflow executed by the Spark executor. Executes a workflow on Spark and generates a SparkSQL temporary table instead of serializing the result. The table can be accessed via JDBC @@ -330,6 +285,3 @@ Generate and share a view on a workflow executed by the Spark executor. Executes The identifier for this plugin is `GenerateSparkView`. It can be found in the package `com.eccenca.di.sql.spark.virtual`. - - - diff --git a/docs/deploy-and-configure/configuration/dataintegration/index.md b/docs/deploy-and-configure/configuration/dataintegration/index.md index 7eb075fb6..3c8fc0c2a 100644 --- a/docs/deploy-and-configure/configuration/dataintegration/index.md +++ b/docs/deploy-and-configure/configuration/dataintegration/index.md @@ -354,6 +354,7 @@ workspace.repository.projectS3 = { # /path/to/my-workspace/ } ``` + For this S3 plugin make sure the account has at least these permissions attached: ``` json @@ -914,7 +915,7 @@ Within Build (DataIntegration) the SqlEndpoint can be used as a source or sink l Any JDBC or ODBC client can connect to a JDBC endpoint provided by an SqlEndpoint dataset. SqlEndpoint uses the same query processing as Hive, therefore the requirements for the client are: - A JDBC driver compatible with *Hive 1.2.1* (platform independent driver org.apache.hive.jdbc.HiveDriver is needed) or - - Hive 1.2.1 is [ODPi](https://github.com/odpi/specs/blob/master/ODPi-Runtime.md) runtime compliant + - Hive 1.2.1 is [ODPi](https://github.com/odpi/specs/blob/master/ODPi-Runtime.md) runtime compliant - A JDBC driver compatible with *Spark 2.3.3* - A Hive ODBC driver (ODBC driver for the client architecture and operating system needed) @@ -1036,10 +1037,10 @@ com.eccenca.di.mappingCreatorEnabled = true The [Mapping Creator can optionally use LLM](../../../build/mapping-creator/index.md#smart-suggestions-with-ai-support) to automatically generate class and property mappings. Use the following configuration section as a blueprint to set up your OpenAI-compatible endpoint, providing: -- the API key, -- a model, -- reasoning level, -- and (optionally) benchmarking outputs. +- the API key, +- a model, +- reasoning level, +- and (optionally) benchmarking outputs. ```bash linenums="1" ################################################# diff --git a/docs/deploy-and-configure/configuration/docker-orchestration/index.md b/docs/deploy-and-configure/configuration/docker-orchestration/index.md index 98c8b0e12..04f4efe52 100644 --- a/docs/deploy-and-configure/configuration/docker-orchestration/index.md +++ b/docs/deploy-and-configure/configuration/docker-orchestration/index.md @@ -18,7 +18,7 @@ The environment files are supplied in the CONFIGFILE environment variable to th For example, in [Scenario: Single Node Cloud Installation](../../installation/scenario-single-node-cloud-installation/index.md) we have created a `prod.env` environment file and created the Corporate Memory instance using `prod.env` configuration: ``` shell-session -$ CONFIGFILE=environments/prod.env make clean-pull-start-bootstrap +CONFIGFILE=environments/prod.env make clean-pull-start-bootstrap ``` When you run `make clean-pull-start-bootstrap` target, the Makefile will evaluate and export the environment variables from the `environments/default.env`, your `${CONFIGFILE}` or `environments/config.env` and `environments/scripted-env.mk`: @@ -43,18 +43,18 @@ To configure the orchestration according to your requirements, you need simply t For example, to replicate the minimum configuration from `config.env`, you can do the following: ``` shell-session -$ echo "create empty environments/prod.env file" -$ touch environments/prod.env -$ echo "inject necessary variables into the prod.env" -$ echo "CMEM_SERVICE_ACCOUNT_CLIENT_SECRET=c8c12828-000c-467b-9b6d-2d6b5e16df4a" >> environments/prod.env -$ echo "STARDOG_PASSWORD=admin" >> environments/prod.env -$ echo "TRUSTSTOREPASS=Aimeik5Ocho5riuC" >> environments/prod.env +echo "create empty environments/prod.env file" +touch environments/prod.env +echo "inject necessary variables into the prod.env" +echo "CMEM_SERVICE_ACCOUNT_CLIENT_SECRET=c8c12828-000c-467b-9b6d-2d6b5e16df4a" >> environments/prod.env +echo "STARDOG_PASSWORD=admin" >> environments/prod.env +echo "TRUSTSTOREPASS=Aimeik5Ocho5riuC" >> environments/prod.env ``` This configuration will be sufficient to run the orchestration locally as described in [Scenario: Local Installation](../../installation/scenario-local-installation/index.md): ``` shell-session -$ CONFIGFILE=environments/prod.env make clean-pull-start-bootstrap +CONFIGFILE=environments/prod.env make clean-pull-start-bootstrap ``` ## Available Configuration Variables @@ -81,7 +81,7 @@ All available configuration environment variables are listed in `environments/de | APACHE_CONFIG | default.conf | Apache2 virtual host configuration | | SSLCONF | ssl.default.conf | Apache2 virtual host configuration for SSL setup | | HTTP_PORT | 80 | APACHE_HTTP_PORT is used as a standard port 80 in SSL setup | -| LETSENCRYPT_MAIL | administration@eccenca.com | email to be used when requesting letsencrypt certificates | +| LETSENCRYPT_MAIL | | email to be used when requesting letsencrypt certificates | | DATAINTEGRATION_BASE_FILE | docker-compose.dataintegration-base.yml | `docker compose` extension file for Build (DataIntegration), see SSL configuration section below for an example | | TRUSTSTOREPASS | (empty) | Truststore password, see self-signed certificates configuration section below for an example | diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md index b44659ab5..831a53a4a 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md @@ -5,7 +5,6 @@ tags: ## Deployment options for explore container - ***Property: deploy.apiPrefix*** API prefix for former dataplatform endpoints i.e. /dataplatform @@ -30,10 +29,8 @@ URI where to redirect to when the user logs out ### Options for additional prometheus metrics endpoint - ***Property: deploy.additional-prometheus-endpoint.enabled*** - | Category | Value | |--- | ---: | | Default | false | @@ -43,7 +40,6 @@ URI where to redirect to when the user logs out ***Property: deploy.additional-prometheus-endpoint.port*** - | Category | Value | |--- | ---: | | Default | 9091 | @@ -53,7 +49,6 @@ URI where to redirect to when the user logs out ***Property: deploy.additional-prometheus-endpoint.context*** - | Category | Value | |--- | ---: | | Default | /metrics | @@ -63,7 +58,7 @@ URI where to redirect to when the user logs out ## License -By default, DataPlatform is subject to the eccenca free Personal, Evaluation and Development License Agreement (PEDAL), a license intended for non-commercial usage. When your delivery includes a dedicated license file, you have to configure DataPlatform to enable your license. +By default, DataPlatform is subject to the eccenca free Personal, Evaluation and Development License Agreement (PEDAL), a license intended for non-commercial usage. When your delivery includes a dedicated license file, you have to configure DataPlatform to enable your license. To change the default configuration, you have several options. If the properties under license are not provided the default license included (PEDAL) is used. In case a dedicated license file is used, different configuration options can overwrite each other. The license is read in the following sequence: @@ -73,12 +68,10 @@ In case a dedicated license file is used, different configuration options can ov 3. license.asc file in the same folder, where the application is started from (in Standalone Mode) 4. Fallback to eccenca free Personal, Evaluation and Development License Agreement (PEDAL) - ***Property: license.key*** Use this property to specify the license key as a YAML multiline string value of the license.key property. - ```yaml key: | -----BEGIN PGP MESSAGE----- @@ -87,7 +80,6 @@ key: | -----END PGP MESSAGE----- ``` - | Category | Value | |--- | ---: | | Default | *none* | @@ -112,7 +104,6 @@ Use this property to specify the location of the license file This section provides general configuration settings. - ### Configuration of Caching DataPlatform provides caching support which is enabled by default with an in-memory Caffeine cache. @@ -124,7 +115,6 @@ which can be further configured under the custom properties "spring.cache.infini To disable caching, set the type to NONE (not recommended). - | Category | Value | |--- | ---: | | Default | INFINISPAN | @@ -134,7 +124,6 @@ To disable caching, set the type to NONE (not recommended). ***Property: spring.cache.infinispan.mode*** - | Category | Value | |--- | ---: | | Default | LOCAL | @@ -144,7 +133,6 @@ To disable caching, set the type to NONE (not recommended). ***Property: spring.mvc.pathmatch.matching-strategy*** - | Category | Value | |--- | ---: | | Default | ant_path_matcher | @@ -154,7 +142,6 @@ To disable caching, set the type to NONE (not recommended). ***Property: spring.thymeleaf.prefix*** - | Category | Value | |--- | ---: | | Default | classpath:/public/ | @@ -164,7 +151,6 @@ To disable caching, set the type to NONE (not recommended). ***Property: spring.thymeleaf.mode*** - | Category | Value | |--- | ---: | | Default | HTML | @@ -178,7 +164,6 @@ DataSource configuration for the chat memory persistence using HSQLDB. In production/Docker environments, data is stored in /var/lib/explore/chat-memory. In development environments, this can be overridden in application-dev.yml. - ***Property: spring.datasource.driver-class-name*** JDBC driver class for HSQLDB @@ -225,19 +210,16 @@ Database password for HSQLDB (empty for embedded mode) ### Configuration of Servlet Container - Multipart upload limits config You may need to set the following parameter values to 2048MB for implementations that cannot handle large requests - ***Property: spring.servlet.multipart.max-file-size*** Use this property to define the maximum size of an uploaded file in number of bytes. Values can use the suffixed "MB" or "KB" (e.g. '1024MB'). **Note:** If DataPlatform is deployed in a Servlet container, make sure to also configure support for large file sizes. - | Category | Value | |--- | ---: | | Default | 4096MB | @@ -269,7 +251,6 @@ Temporary storage used for multipart upload. This defaults to system property ja ***Property: spring.jackson.default-property-inclusion*** - | Category | Value | |--- | ---: | | Default | non_null | @@ -279,7 +260,6 @@ Temporary storage used for multipart upload. This defaults to system property ja ***Property: spring.ai.retry.max-attempts*** - | Category | Value | |--- | ---: | | Default | 2 | @@ -291,7 +271,6 @@ Temporary storage used for multipart upload. This defaults to system property ja Use this property to enable or disable the AI capabilities of Explore - | Category | Value | |--- | ---: | | Default | false | @@ -301,7 +280,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.name*** - | Category | Value | |--- | ---: | | Default | cmem-mcp-server | @@ -311,7 +289,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.version*** - | Category | Value | |--- | ---: | | Default | 1.0.0 | @@ -321,7 +298,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.type*** - | Category | Value | |--- | ---: | | Default | SYNC | @@ -331,7 +307,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.instructions*** - | Category | Value | |--- | ---: | | Default | This server provides access to corporate memory | @@ -341,7 +316,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.capabilities.tool*** - | Category | Value | |--- | ---: | | Default | true | @@ -351,7 +325,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.capabilities.resource*** - | Category | Value | |--- | ---: | | Default | true | @@ -361,7 +334,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.capabilities.prompt*** - | Category | Value | |--- | ---: | | Default | true | @@ -371,7 +343,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.capabilities.completion*** - | Category | Value | |--- | ---: | | Default | true | @@ -381,7 +352,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.enabled*** - | Category | Value | |--- | ---: | | Default | true | @@ -391,7 +361,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.sse-message-endpoint*** - | Category | Value | |--- | ---: | | Default | /dataplatform/mcp/messages | @@ -401,7 +370,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.sse-endpoint*** - | Category | Value | |--- | ---: | | Default | /dataplatform/mcp/sse | @@ -411,7 +379,6 @@ Use this property to enable or disable the AI capabilities of Explore ***Property: spring.ai.mcp.server.stdio*** - | Category | Value | |--- | ---: | | Default | false | @@ -423,7 +390,6 @@ Use this property to enable or disable the AI capabilities of Explore This configures the possible chat models for interacting with the companion. - | Category | Value | |--- | ---: | | Default | openai | @@ -433,7 +399,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.model.embedding*** - | Category | Value | |--- | ---: | | Default | openai | @@ -443,7 +408,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.anthropic.api-key*** - | Category | Value | |--- | ---: | | Default | | @@ -453,7 +417,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.anthropic.chat.options.max-tokens*** - | Category | Value | |--- | ---: | | Default | 10000 | @@ -463,7 +426,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.openai.api-key*** - | Category | Value | |--- | ---: | | Default | | @@ -473,7 +435,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.openai.chat.options.response-format.type*** - | Category | Value | |--- | ---: | | Default | json_schema | @@ -483,7 +444,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.openai.chat.options.response-format.schema*** - | Category | Value | |--- | ---: | | Default | { \"name\": \"string\", \"schema\": \"string\" } @@ -494,7 +454,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.azure.openai.endpoint*** - | Category | Value | |--- | ---: | | Default | | @@ -504,7 +463,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.azure.openai.chat.options.response-format.type*** - | Category | Value | |--- | ---: | | Default | json_schema | @@ -514,7 +472,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.azure.openai.chat.options.response-format.schema*** - | Category | Value | |--- | ---: | | Default | { \"name\": \"string\", \"schema\": \"string\" } @@ -525,7 +482,6 @@ This configures the possible chat models for interacting with the companion. ***Property: spring.ai.mistral-ai.api-key*** - | Category | Value | |--- | ---: | | Default | | @@ -535,7 +491,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.info.env.enabled*** - | Category | Value | |--- | ---: | | Default | true | @@ -545,7 +500,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.endpoints.web.base-path*** - | Category | Value | |--- | ---: | | Default | /dataplatform/actuator | @@ -555,7 +509,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.endpoints.web.exposure.include*** - | Category | Value | |--- | ---: | | Default | * | @@ -565,7 +518,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.endpoints.enabled-by-default*** - | Category | Value | |--- | ---: | | Default | false | @@ -575,7 +527,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.endpoint.health.enabled*** - | Category | Value | |--- | ---: | | Default | true | @@ -585,7 +536,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.endpoint.health.show-details*** - | Category | Value | |--- | ---: | | Default | when_authorized | @@ -595,7 +545,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.endpoint.info.enabled*** - | Category | Value | |--- | ---: | | Default | true | @@ -605,7 +554,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.health.diskspace.enabled*** - | Category | Value | |--- | ---: | | Default | false | @@ -615,7 +563,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.health.livenessstate.enabled*** - | Category | Value | |--- | ---: | | Default | true | @@ -625,7 +572,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.health.readinessstate.enabled*** - | Category | Value | |--- | ---: | | Default | true | @@ -635,7 +581,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.health.sparql.enabled*** - | Category | Value | |--- | ---: | | Default | true | @@ -645,7 +590,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.health.sparql.fixedDelayInMilliseconds*** - | Category | Value | |--- | ---: | | Default | 5000 | @@ -655,7 +599,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.health.sparql.timeoutInMilliseconds*** - | Category | Value | |--- | ---: | | Default | 5000 | @@ -665,7 +608,6 @@ This configures the possible chat models for interacting with the companion. ***Property: management.influx.metrics.export.enabled*** - | Category | Value | |--- | ---: | | Default | false | @@ -706,7 +648,6 @@ springdoc: enabled: true ``` - ***Property: springdoc.api-docs.enabled*** Use this property to enable and expose endpoint that provide the OpenAPI compliant specification of the DataPlatform APIs. The following endpoints will become available when this option is set to true: @@ -715,7 +656,6 @@ Use this property to enable and expose endpoint that provide the OpenAPI complia - /v3/api-docs.yaml - /v3/api-docs/swagger-config - | Category | Value | |--- | ---: | | Default | false | @@ -729,7 +669,6 @@ Use this property to enable and expose a Swagger UI browser interface that can b - /swagger-ui.html - | Category | Value | |--- | ---: | | Default | false | @@ -751,10 +690,9 @@ http: - https://example.com ``` - ***Property: http.cors.allowedOrigins*** -Use this property to define the list of allowed origins. The values must be either specific origins, e.g. http://example.org, or * for all origins. +Use this property to define the list of allowed origins. The values must be either specific origins, e.g. , or * for all origins. | Category | Value | |--- | ---: | @@ -822,7 +760,6 @@ Use this property to define how long in seconds the response from a pre-flight r Java 11 HTTP client settings for HTTP access to the backend store. - ***Property: httpclient.connectionPoolSize*** The maximum number of connections to keep in the HTTP/1.1 keep alive cache. A value of 0 means that the cache is unbounded @@ -863,7 +800,6 @@ authorization: adminGroup: admins ``` - | Category | Value | |--- | ---: | | Default | true | @@ -913,10 +849,9 @@ Use this property to configure the group that gets root access if enabled (see s Use this property to configure the URI of the public user group (see section Public access). **Note:** If you change this property, you also need to change existing URI descriptions and existing access conditions. - | Category | Value | |--- | ---: | -| Default | https://vocab.eccenca.com/auth/PublicGroup | +| Default | | | Required | false | | Valid values | string | | Environment | AUTHORIZATION_ABOX_PUBLICGROUP | @@ -926,10 +861,9 @@ Use this property to configure the URI of the public user group (see section Pub Use this property to configure the URI of the public user (see section Public access). **Note:** If you change this property, you also need to change existing URI descriptions and existing access conditions. - | Category | Value | |--- | ---: | -| Default | https://vocab.eccenca.com/auth/AnonymousUser | +| Default | | | Required | false | | Valid values | string | | Environment | AUTHORIZATION_ABOX_ANONYMOUSUSER | @@ -938,13 +872,11 @@ Use this property to configure the URI of the public user (see section Public ac **IMPORTANT:** The following properties are deprecated and have no function anymore! - ***Property: authorization.abox.accessConditions.url*** **DEPRECATED** Use this property to set the URL of the access conditions model file. This can be either a remote (http://...) or a local (file:...) .rdf file. Refer to section Access conditions for more information on the access conditions model. - | Category | Value | |--- | ---: | | Default | *none* | @@ -958,10 +890,9 @@ Use this property to set the URL of the access conditions model file. This can b Use this property to set the graph containing the access conditions model. **Note:** If you change this property, you also need to change the corresponding shape definitions for access conditions (more precisely, the UI SPARQL queries). - | Category | Value | |--- | ---: | -| Default | https://ns.eccenca.com/data/ac/ | +| Default | | | Required | false | | Valid values | string | | Conflicts with | url | @@ -973,7 +904,7 @@ SPARQL endpoints declare how DataPlatform connects to a SPARQL-capable store or With the default configuration, DataPlatform uses an in-memory database. This means, that no persistent storage is available, unless a store supporting data persistence is configured. -The following example showcases a setup in which for each Resource all rdfs:label, Literals with language es, then en and in the end those without a language are evaluated. +The following example showcases a setup in which for each Resource all rdfs:label, Literals with language es, then en and in the end those without a language are evaluated. If nothing matches here, skos:prefLabel is examined in the same way ```yaml @@ -989,14 +920,13 @@ proxy: - "" ``` - ***Property: proxy.defaultBaseIri*** -Base IRI for this Corporate Memory instance. If not set falls back to environment variable DEPLOY_BASE_URL, further fallback to https://fallback.eccenca.com/ +Base IRI for this Corporate Memory instance. If not set falls back to environment variable DEPLOY_BASE_URL, further fallback to | Category | Value | |--- | ---: | -| Default | https://fallback.eccenca.com/ | +| Default | | | Required | false | | Valid values | URI | | Environment | PROXY_DEFAULTBASEIRI | @@ -1006,7 +936,6 @@ Base IRI for this Corporate Memory instance. If not set falls back to environmen Use this property to specify which RDF properties should be used to provide label values when matching IRIs against a search term during rewriting SELECT-queries. **Note:** This configuration property affects modification of SELECT-queries for search triggered by the search-string query parameter. Results of SELECT-queries when the resolveLabels property is set to LABELS - | Category | Value | |--- | ---: | | Default | [http://www.w3.org/2004/02/skos/core#prefLabel, http://www.w3.org/2000/01/rdf-schema#label, http://purl.org/dc/terms/title, http://www.w3.org/ns/shacl#name] | @@ -1019,7 +948,6 @@ Use this property to specify which RDF properties should be used to provide labe Use this property to specify which RDF properties should be used to provide description values when matching IRIs against a search term during rewriting SELECT-queries. **Note:** This configuration property affects modification of SELECT-queries for search triggered by the search-string query parameter. Results of SELECT-queries when the resolveLabels property is set to LABELS - | Category | Value | |--- | ---: | | Default | [http://purl.org/dc/terms/description, http://www.w3.org/2000/01/rdf-schema#comment] | @@ -1029,11 +957,10 @@ Use this property to specify which RDF properties should be used to provide desc ***Property: proxy.languagePreferences*** -Specifies base language preferences for this instance. +Specifies base language preferences for this instance. **Note:** This configuration property affects results of SELECT-queries when the resolveLabels property is set to LABELS. - | Category | Value | |--- | ---: | | Default | [en, , de, fr] | @@ -1058,7 +985,6 @@ The Concise Boundary Description is used for viewing and editing resoures. By default up to a max of 5 Blank nodes are traversed for calculation. Increasing the max fetch will support deeper constructs, but will also add to loading time. - | Category | Value | |--- | ---: | | Default | 5 | @@ -1068,10 +994,9 @@ Increasing the max fetch will support deeper constructs, but will also add to lo ***Property: proxy.maxCBDStatements*** -The max amount of statements which the Concise Bound Description can contain. +The max amount of statements which the Concise Bound Description can contain. (S)CBDs surpassing this will not load but return an error - | Category | Value | |--- | ---: | | Default | 1000000 | @@ -1087,7 +1012,6 @@ returned per `shacl:PropertyShape`. The default needs to be larger than the Data for 'propertyLimit', which is up to 25. Changing this value allows custom endpoints to fetch more data. Increasing this value will increase response time - | Category | Value | |--- | ---: | | Default | 26 | @@ -1155,10 +1079,10 @@ Maximum amount of memory entries for shacl batch validation results can take up. Value Fetch Strategy Determines how the Knowledge Graph is walked for values for specific resources. Used for resolving titles & comments and loading shaped resources. + - RESOURCE_IN_VALUES uses a SPARQL `VALUES (?resource ) { (:resource1)(:resource2)}` - FILTER_ONLY Uses SPARQL uses a SPARQL `FILTER (?resource in (:resource1, :resource2))` - | Category | Value | |--- | ---: | | Default | RESOURCE_IN_VALUES | @@ -1215,7 +1139,6 @@ gitSync: scheduledPullCron: "0 */5 * * * *" ``` - ***Property: gitSync.enabled*** Activates / Deactivates git graph sync feature @@ -1299,7 +1222,7 @@ The committer email which appears in the commit message on system commits | Category | Value | |--- | ---: | -| Default | info@eccenca.com | +| Default | | | Required | false | | Valid values | string | | Environment | GITSYNC_COMMITTEREMAIL | @@ -1310,9 +1233,9 @@ Schedules Pull Frequency - Configured git repositories for sync are pulled regul | Category | Value | |--- | ---: | -| Default | 0 */30 * * * * | +| Default | 0 */30* ** * | | Required | false | -| Valid values | Cron setting according to https://docs.spring.io/spring-framework/docs/current/reference/html/integration.html#scheduling-cron-expression | +| Valid values | Cron setting according to | | Environment | GITSYNC_SCHEDULEDPULLCRON | ***Property: semspect.enabled*** @@ -1351,11 +1274,9 @@ logging: file: /var/logs/dataplatform.log ``` - Use these properties to specify where you want to store your logging file. Specifying a file leads to both, logging to standard output and the file. File output creates an auto-rotating file with 10 MB file size each. - ***Property: logging.file.name*** Log file name (for instance, `myapp.log`). Names can be an exact location or relative to the current directory. @@ -1389,7 +1310,6 @@ logging: configuration: ELDS_HOME/etc/dataplatform/logback.xml ``` - | Category | Value | |--- | ---: | | Default | *none* | @@ -1399,7 +1319,6 @@ logging: ***Property: logging.level.audit*** - | Category | Value | |--- | ---: | | Default | INFO | @@ -1409,7 +1328,6 @@ logging: ***Property: logging.level.com.eccenca.elds.backend*** - | Category | Value | |--- | ---: | | Default | INFO | @@ -1419,7 +1337,6 @@ logging: ***Property: logging.level.org.springframework*** - | Category | Value | |--- | ---: | | Default | WARN | @@ -1429,7 +1346,6 @@ logging: ***Property: logging.level.com.eccenca.elds.backend.webapp.web.filter.SimpleCorsFilter*** - | Category | Value | |--- | ---: | | Default | WARN | @@ -1439,7 +1355,6 @@ logging: ***Property: logging.level.com.eccenca.elds.backend.webapp.web.GlobalControllerExceptionHandler*** - | Category | Value | |--- | ---: | | Default | TRACE | @@ -1449,7 +1364,6 @@ logging: ***Property: logging.level.com.eccenca.elds.backend.cache.logging*** - | Category | Value | |--- | ---: | | Default | WARN | @@ -1459,7 +1373,6 @@ logging: ***Property: logging.level.org.hibernate.search.backend.lucene.impl*** - | Category | Value | |--- | ---: | | Default | ERROR | @@ -1479,13 +1392,11 @@ auditTrail: - "aksw.org" ``` - ***Property: audit-trail.enabled*** Use this property to enable logging of read and write access to every graph access. If auditTrail.auditedGraphs is specified, only those graphs are logged. **Note:** If audit trail logging is enabled, RDF upload over the Graph Store Protocol interface is limited to triple formats. Any attempt to upload a quad format results in an HTTP 415 error. - | Category | Value | |--- | ---: | | Default | false | @@ -1508,7 +1419,6 @@ Limits the size of the query response ***Property: sparql.query.limit*** - | Category | Value | |--- | ---: | | Default | 100000 | @@ -1533,7 +1443,6 @@ server: contextPath: /dataplatform ``` - ***Property: server.port*** Use this property to set the TCP port where the embedded server is available. @@ -1547,7 +1456,6 @@ Use this property to set the TCP port where the embedded server is available. ***Property: server.error.include-stacktrace*** - | Category | Value | |--- | ---: | | Default | NEVER | @@ -1570,7 +1478,6 @@ Tomcat servlet settings ***Property: server.servlet.session.cookie.same-site*** - | Category | Value | |--- | ---: | | Default | Lax | @@ -1594,7 +1501,6 @@ server: client-auth: NEED ``` - ***Property: server.ssl.key-store*** Use this property to define the path to the KeyStore used for one-way or two-way SSL authentication. @@ -1605,7 +1511,6 @@ In case of two-way authentication, a TrustStore must also be configured. This co JAVA_TOOL_OPTIONS=-Djavax.net.ssl.trustStore=path_to_trust_store.jks -Djavax.net.ssl.trustStorePassword=trust_store_password (ADD TO EXISTING JAVA_TOOL_OPTIONS) ``` - | Category | Value | |--- | ---: | | Default | *none* | @@ -1630,7 +1535,6 @@ Use this property to define the client identification policy. If WANT is set, client identification is optional. If NEED is set, client identification is mandatory, so unauthenticated clients are refused. - | Category | Value | |--- | ---: | | Default | *none* | @@ -1656,7 +1560,6 @@ Configuration recommendation: **Note:** This configuration recommendation provides settings for headers most commonly used by proxies. Make sure to add all three properties in order to enforce HTTPS. - ***Property: server.tomcat.remoteIpHeader*** Use this property to set the request header which is required to identify the originating IP address of the client connecting to DataPlatform through an HTTP proxy. @@ -1681,7 +1584,6 @@ Use this property to set the request header which is required to identify the or ***Property: server.tomcat.max-swallow-size*** - | Category | Value | |--- | ---: | | Default | -1 | @@ -1728,16 +1630,14 @@ Limits how many background query requests can be run in parallel. This applies t ## Asynchronous file uploads -Files can be asynchronously uploaded to the backend store in multiple steps which include an analysis of the uploaded file. +Files can be asynchronously uploaded to the backend store in multiple steps which include an analysis of the uploaded file. Please s. API documentation under /api/upload/ for further information. - ***Property: files.maxStorageSingleFileSizeMb*** Maximum size of one stored file (as uploaded i.e. can also be compressed size) Value in Mb - | Category | Value | |--- | ---: | | Default | 3000 | @@ -1750,7 +1650,6 @@ Value in Mb Minimum storage space left on temp device of DataPlatform for file uploads Value in Mb - | Category | Value | |--- | ---: | | Default | 3000 | @@ -1763,7 +1662,6 @@ Value in Mb Cron setting for housekeeping / maintenance job Stored files and saved analysis will be deleted if older than maintenanceExpirationDuration - | Category | Value | |--- | ---: | | Default | P1D | @@ -1829,4 +1727,3 @@ Query timeout as duration which is active if no timeout in request has been set | Required | false | | Valid values | ISO 8601 duration format | | Environment | STORE_QUERYTIMEOUTGENERAL | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md index 4057e5ad3..7016879af 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md @@ -27,7 +27,6 @@ store: createRepositoryOnStartup: true ``` - ***Property: store.type*** The type of the store must be set to "graphdb" @@ -41,7 +40,6 @@ The type of the store must be set to "graphdb" ### Specific settings for GraphDB - ***Property: store.graphdb.host*** The host of the GraphDB database @@ -147,7 +145,7 @@ The iri of the lucene index to be used for searches. If the default index is use | Category | Value | |--- | ---: | -| Default | http://www.ontotext.com/connectors/lucene/instance#cmembaseindex | +| Default | | | Required | false | | Valid values | Valid URI of lucene index | | Environment | STORE_GRAPHDB_GDBBASEINDEX | @@ -173,4 +171,3 @@ Maximum amount of quads of change tracking result which will be loaded in memory | Required | false | | Valid values | int | | Environment | STORE_GRAPHDB_GRAPHDBCHANGETRACKINGMAXQUADMEMORY | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md index 921f665ea..419f03ca1 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md @@ -22,7 +22,6 @@ store: password: "password" ``` - ***Property: store.type*** The type of the store must be set to "http" @@ -36,7 +35,6 @@ The type of the store must be set to "http" ***Property: store.authorization*** - | Category | Value | |--- | ---: | | Default | REWRITE_FROM | @@ -52,7 +50,7 @@ Use this property to configure the endpoint to which SPARQL 1.1 queries are sent | Category | Value | |--- | ---: | -| Default | http://localhost:7200/repositories/cmem | +| Default | | | Required | true | | Valid values | string | | Environment | STORE_HTTP_QUERY_ENDPOINT_URL | @@ -63,7 +61,7 @@ Use this property to configure the endpoint to which SPARQL 1.1 updates are sent | Category | Value | |--- | ---: | -| Default | http://localhost:7200/repositories/cmem/statements | +| Default | | | Required | true | | Valid values | string | | Environment | STORE_HTTP_UPDATE_ENDPOINT_URL | @@ -74,7 +72,7 @@ Use this property to configure the endpoint to SPARQL 1.1 Graph Store Protocol r | Category | Value | |--- | ---: | -| Default | http://localhost:7200/repositories/cmem/rdf-graphs/service | +| Default | | | Required | false | | Valid values | string | | Environment | STORE_HTTP_GRAPH_STORE_ENDPOINT_URL | @@ -122,4 +120,3 @@ Defines how the raw list of graphs is retrieved, and therefore which graphs are | Required | false | | Valid values | Valid SPARQL query with bound variable "g" | | Environment | STORE_HTTP_GRAPHLISTQUERY | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md index 7b0d1b943..29ff13f1e 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md @@ -5,9 +5,9 @@ tags: ## Configuration for connecting to internal memory backend -You can configure a in-memory SPARQL backend. Based on Jena Models, in-memory backends do not provide persistent storage. +You can configure a in-memory SPARQL backend. Based on Jena Models, in-memory backends do not provide persistent storage. Hence, shutting down a DataPlatform configured with an in-memory backend deletes your data and therefore you should use it only for testing purposes. - + Configuration example: This example configures an in-memory store which initializes with the triples contained in the given file. @@ -21,7 +21,6 @@ store: - "/data/data.trig" ``` - ***Property: store.type*** The type of the store must be set to "memory" @@ -35,7 +34,6 @@ The type of the store must be set to "memory" ***Property: store.authorization*** - | Category | Value | |--- | ---: | | Default | REWRITE_FROM | @@ -55,4 +53,3 @@ list of files in file URI scheme | Required | false | | Valid values | A list of files | | Environment | STORE_MEMORY_FILES | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md index 878ee7513..d3c21fdbf 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md @@ -7,7 +7,7 @@ tags: Configuration example: -This example configures a connection to a neptune instance in the AWS region eu-central-1. Authentication is enabled +This example configures a connection to a neptune instance in the AWS region eu-central-1. Authentication is enabled so it is assumed that CMEM runs on a EC2 VM with configured role for authentication to neptune. Files (uncompressed) greater than 100MB are uploaded via S3 based bulk loader. The S3 bucket is accessed in this case via an access point which is configured here. The EC2 role CMEM runs under has write access to the bucket. One of the role the neptune cluster runs under is configured in this setting and has read access to the bucket. @@ -34,7 +34,6 @@ store: management.tracing.enabled: false ``` - ***Property: store.type*** The type of the store must be set to "neptune" @@ -74,7 +73,7 @@ Settings for the connection to the Amazon Cloud ***Property: store.neptune.aws.region*** -The region where the neptune instance is located i.e. "eu-central-1" s. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions +The region where the neptune instance is located i.e. "eu-central-1" s. | Category | Value | |--- | ---: | @@ -85,7 +84,7 @@ The region where the neptune instance is located i.e. "eu-central-1" s. https:// ***Property: store.neptune.aws.authEnabled*** -Whether the neptune instance is configured with enabled IAM authentication. In case of enabled authentication the credentials need to be accessible to the JVM of the dataplatform. Deployment on EC2 and assigning a role to the VM is sufficient. Other ways to achieve this are described in https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html +Whether the neptune instance is configured with enabled IAM authentication. In case of enabled authentication the credentials need to be accessible to the JVM of the dataplatform. Deployment on EC2 and assigning a role to the VM is sufficient. Other ways to achieve this are described in | Category | Value | |--- | ---: | @@ -94,7 +93,7 @@ Whether the neptune instance is configured with enabled IAM authentication. In c | Valid values | boolean | | Environment | STORE_NEPTUNE_AWS_AUTHENABLED | -Settings for S3 bucket connection and upload of large files to the neptune instance. The neptune store blocks all HTTP requests with size >150MB. To upload larger files a graph file is temporarily stored in a S3 bucket and uploaded via Neptune Bulk Loader. The S3 bucket needs to be in the same region as the neptune cluster. For more information s. https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load.html. If no S3 upload is necessary then the limit of 150 MB on HTTPS uploads apply for neptune. The whole section can be left out of the configuration. +Settings for S3 bucket connection and upload of large files to the neptune instance. The neptune store blocks all HTTP requests with size >150MB. To upload larger files a graph file is temporarily stored in a S3 bucket and uploaded via Neptune Bulk Loader. The S3 bucket needs to be in the same region as the neptune cluster. For more information s. . If no S3 upload is necessary then the limit of 150 MB on HTTPS uploads apply for neptune. The whole section can be left out of the configuration. ***Property: store.neptune.s3.bucketNameOrAPAlias*** @@ -109,7 +108,7 @@ The name of the bucket or access point -> the role CMEM runs under needs write a ***Property: store.neptune.s3.iamRoleArn*** -The name of the role the neptune loader accesses the bucket -> the role needs read access to the bucket s. https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-IAM.html +The name of the role the neptune loader accesses the bucket -> the role needs read access to the bucket s. | Category | Value | |--- | ---: | @@ -139,4 +138,3 @@ The degree of parallelism (CPU) for the neptune loader, possible values are LOW, | Required | false | | Valid values | LOW, MEDIUM, HIGH, OVERSUBSCRIBE | | Environment | STORE_NEPTUNE_S3_BULKLOADPARALLELISM | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-oauth-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-oauth-full.md index 8fbf3442b..3128cc80e 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-oauth-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-oauth-full.md @@ -20,14 +20,12 @@ spring: issuerUri: http://keycloak/auth/realms/cmem ``` - #### OAuth 2.0 Resource Server In order to protect access to it’s resources, DataPlatform acts as an OAuth 2.0 resource server accepting and responding to a protected resource request using a JSON Web Token (JWT). The OAuth 2.0 specification as well as the JSON Web Token specification don’t define any mandatory claims to be contained in a JWT access token. However, if the property spring.security.oauth2.resourceserver.jwt.issuer-uri is set, the iss (issuer) claim is required to be contained in the JWT. It’s value must be equal to the configured issuer URI. Additionally, in order to identify the requesting principal, either the username claim or the clientId claim must be contained in the JWT. - ***Property: spring.security.oauth2.resourceserver.anonymous*** Use this property to allow anonymous access to protected resources. @@ -46,10 +44,9 @@ If this property is set, the iss (issuer) claim is required to be contained in t **Note:** If the authorization server is down when DataPlatform queries it (given appropriate timeouts), then startup will fail. Also, if the authorization server doesn’t support the Provider Configuration endpoint, or if DataPlatform must be able to start up independently from the authorization server, use the property jwk-set-uri instead. - | Category | Value | |--- | ---: | -| Default | http://docker.localhost/auth/realms/cmem | +| Default | | | Required | false | | Valid values | URI to OpenID Connect Provider | | Conflicts with | spring.security.oauth2.resourceserver.jwt.jwkSetUri | @@ -104,16 +101,13 @@ Use this property to specify the claim providing the OAuth 2.0 client ID to whic #### OAuth 2.0 client configuration -In order to protect access to it's resources, DataPlatform acts as an OAuth 2.0 Client which provides authentication its own clients by means of a session cookie. For this type of authentication a JSON Web Token (JWT) -is not necessary. The registration which is configured is named "keycloak" and provides a login page redirecting to a keycloak backend. For specific customizations please s. https://docs.spring.io/spring-security/reference/servlet/oauth2/client/index.html - +In order to protect access to it's resources, DataPlatform acts as an OAuth 2.0 Client which provides authentication its own clients by means of a session cookie. For this type of authentication a JSON Web Token (JWT) +is not necessary. The registration which is configured is named "keycloak" and provides a login page redirecting to a keycloak backend. For specific customizations please s. One authentication backend is configured named 'keycloak'. The login page is accessible under '{basepath}/oauth2/authorization/keycloak' - ***Property: spring.security.oauth2.client.registration.keycloak.client-id*** - | Category | Value | |--- | ---: | | Default | dataintegration | @@ -123,7 +117,6 @@ One authentication backend is configured named 'keycloak'. The login page is acc ***Property: spring.security.oauth2.client.registration.keycloak.authorization-grant-type*** - | Category | Value | |--- | ---: | | Default | authorization_code | @@ -133,7 +126,6 @@ One authentication backend is configured named 'keycloak'. The login page is acc ***Property: spring.security.oauth2.client.registration.keycloak.client-authentication-method*** - | Category | Value | |--- | ---: | | Default | basic | @@ -143,7 +135,6 @@ One authentication backend is configured named 'keycloak'. The login page is acc ***Property: spring.security.oauth2.client.registration.keycloak.redirectUri*** - | Category | Value | |--- | ---: | | Default | {baseUrl}/login/oauth2/code/{registrationId} | @@ -153,7 +144,6 @@ One authentication backend is configured named 'keycloak'. The login page is acc ***Property: spring.security.oauth2.client.registration.keycloak.scope*** - | Category | Value | |--- | ---: | | Default | [openid, profile, email] | @@ -163,21 +153,18 @@ One authentication backend is configured named 'keycloak'. The login page is acc ***Property: spring.security.oauth2.client.registration.keycloak.provider.keycloak.issuer-uri*** - | Category | Value | |--- | ---: | -| Default | http://docker.localhost/auth/realms/cmem | +| Default | | | Required | false | | Valid values | string | | Environment | SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_KEYCLOAK_PROVIDER_KEYCLOAK_ISSUER_URI | ***Property: spring.security.oauth2.client.registration.keycloak.provider.keycloak.user-name-attribute*** - | Category | Value | |--- | ---: | | Default | preferred_username | | Required | false | | Valid values | string | | Environment | SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_KEYCLOAK_PROVIDER_KEYCLOAK_USER_NAME_ATTRIBUTE | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md index 6b6b43e81..873286ce2 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md @@ -7,7 +7,7 @@ tags: Configuration example: -This example configures a connection with HTTPS to a remote Virtuoso store (https://remote:8080). +This example configures a connection with HTTPS to a remote Virtuoso store (). ```yaml store: @@ -22,7 +22,6 @@ store: databasePort: 1111 ``` - ***Property: store.type*** The type of the store must be set to "virtuoso" @@ -101,4 +100,3 @@ The credentials of the given user | Required | false | | Valid values | string | | Environment | STORE_VIRTUOSO_PASSWORD | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/index.md b/docs/deploy-and-configure/configuration/explore/dataplatform/index.md index 26645d1b6..669eb0af2 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/index.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/index.md @@ -11,11 +11,11 @@ To use this manual, system administrators should have knowledge about Linux (Ubu The following subsections describe different configuration topics in detail. Every subsection is presented with a property key overview and a details section with additional explanations: -- [Explore backend general configuration](application-full.md) -- [OAuth specific configuration](application-oauth-full.md) -- Triple Store specific configuration - - [Ontotext GraphDB](application-graphdb-full.md) - - [HTTP](application-http-full.md) - - [In-Memory](application-inmemory-full.md) - - [AWS Neptune](application-neptune-full.md) - - [Openlink Virtuoso](application-virtuoso-full.md) +- [Explore backend general configuration](application-full.md) +- [OAuth specific configuration](application-oauth-full.md) +- Triple Store specific configuration + - [Ontotext GraphDB](application-graphdb-full.md) + - [HTTP](application-http-full.md) + - [In-Memory](application-inmemory-full.md) + - [AWS Neptune](application-neptune-full.md) + - [Openlink Virtuoso](application-virtuoso-full.md) diff --git a/docs/deploy-and-configure/configuration/explore/graph-resource-pattern/index.md b/docs/deploy-and-configure/configuration/explore/graph-resource-pattern/index.md index d7aeeec5d..cfb39ecea 100644 --- a/docs/deploy-and-configure/configuration/explore/graph-resource-pattern/index.md +++ b/docs/deploy-and-configure/configuration/explore/graph-resource-pattern/index.md @@ -211,5 +211,5 @@ A valid configuration must use a `subjectVarName` called `resource`. This is the This configuration produces the following result, it only shows results where: -- `resource` is of type `http://example.com/vocab/Company` -- a `subResource` exists which is related to `resource` via the `http://example.com/vocab/hasParent` property +- `resource` is of type `http://example.com/vocab/Company` +- a `subResource` exists which is related to `resource` via the `http://example.com/vocab/hasParent` property diff --git a/docs/deploy-and-configure/configuration/explore/index.md b/docs/deploy-and-configure/configuration/explore/index.md index 954b1d8a3..6c32a3f57 100644 --- a/docs/deploy-and-configure/configuration/explore/index.md +++ b/docs/deploy-and-configure/configuration/explore/index.md @@ -8,27 +8,26 @@ tags: This page describes how to configure the eccenca Explore component which is done in two parts: -1. The Explore frontend (DataManager) is configured visually through the :eccenca-module-workspace-configuration: _Workspace configuration_ module. -2. [The Explore backend (DataPlatform)](dataplatform/index.md), all details are described on the respective sub-page +1. The Explore frontend (DataManager) is configured visually through the :eccenca-module-workspace-configuration: _Workspace configuration_ module. +2. [The Explore backend (DataPlatform)](dataplatform/index.md), all details are described on the respective sub-page eccenca Explore frontend (DataManager) is a single-page JavaScript application, which means the application consists of a single HTML page which loads all needed web resources in the browser after loading the page itself. In the context of Explore frontend (DataManager), these web resources are: -- The application including its configuration (`app*.js`, `config.js`) -- Styles (`*.css`) -- Web fonts for typography as well as for icons (`*.woff`, `*.ttf`, `*.eot`) -- Images (e.g. logos) (`*.png`, `*.svg`) +- The application including its configuration (`app*.js`, `config.js`) +- Styles (`*.css`) +- Web fonts for typography as well as for icons (`*.woff`, `*.ttf`, `*.eot`) +- Images (e.g. logos) (`*.png`, `*.svg`) Explore frontend (DataManager) communicates with different API endpoints in order to retrieve and manipulate data. The features of Explore frontend (DataManager) include: -- Dataset Manager to create and update datasets and its meta data -- Vocabulary Manager to install and remove Vocabulary descriptions -- Data browser to explore and manage graph-based data -- Taxonomy Editor to manage and create SKOS based taxonomies -- Query editor to query graph-based data via SPARQL queries -- Access control -- Compliance of W3C standards such as [RDF](https://www.w3.org/standards/techs/rdf#w3c_all), [Linked Data](https://www.w3.org/standards/techs/linkeddata#w3c_all) and [SPARQL](https://www.w3.org/standards/techs/sparql#w3c_all) - +- Dataset Manager to create and update datasets and its meta data +- Vocabulary Manager to install and remove Vocabulary descriptions +- Data browser to explore and manage graph-based data +- Taxonomy Editor to manage and create SKOS based taxonomies +- Query editor to query graph-based data via SPARQL queries +- Access control +- Compliance of W3C standards such as [RDF](https://www.w3.org/standards/techs/rdf#w3c_all), [Linked Data](https://www.w3.org/standards/techs/linkeddata#w3c_all) and [SPARQL](https://www.w3.org/standards/techs/sparql#w3c_all) diff --git a/docs/deploy-and-configure/configuration/graphinsights/index.md b/docs/deploy-and-configure/configuration/graphinsights/index.md index adca1410c..044555f50 100644 --- a/docs/deploy-and-configure/configuration/graphinsights/index.md +++ b/docs/deploy-and-configure/configuration/graphinsights/index.md @@ -62,8 +62,8 @@ See [Configure OAuth clients (helm)](../../../deploy-and-configure/configuration The configuration mentioned below is rendered with those files, but you usually don't have to touch those: -- `configuration-files/explore-application.yml` for Explore -- `configuration-files/cmem.integration.config.yml` for Graph Insights +- `configuration-files/explore-application.yml` for Explore +- `configuration-files/cmem.integration.config.yml` for Graph Insights For more details please have a look in the helm value file. Every configuration is documented there. @@ -242,8 +242,8 @@ There is the need for a client to authenticate in a browser and a second client For convenience, by default we use the same clients as for the rest of the application: -- Client for browser: cmem -- Client for component communication: cmem-service-account +- Client for browser: cmem +- Client for component communication: cmem-service-account In case you want to have separate clients for production deployments, this chapter is for you. @@ -309,12 +309,12 @@ separate clients are already created when starting the Postgresql container. However, regenerating a new client secret is advisable: -- Select the `eccenca Corporate Memory`-realm -- Select `Clients` -- Select the client `graph-insights-service-account`. -- Select the tab `Credentials` -- Press `Regenerate` -- Then copy the new secret and fill in the values from above. +- Select the `eccenca Corporate Memory`-realm +- Select `Clients` +- Select the client `graph-insights-service-account`. +- Select the tab `Credentials` +- Press `Regenerate` +- Then copy the new secret and fill in the values from above. ![Keycloak client secret regenerate](keycloak-client-secret-regenerate.png) @@ -325,17 +325,17 @@ In an already running deployment you would have to create those clients on your We assume that we name the clients as following: -- Browser client: `graph-insights` -- Inter-component client: `graph-insights-service-account` +- Browser client: `graph-insights` +- Inter-component client: `graph-insights-service-account` #### Set Backchannel logout URL for Graph Insights You would follow the same steps as in [Keycloak Client configuration](../../../deploy-and-configure/configuration/keycloak/index.md) but name them differently, e.g. as above. In addition, one last step is missing: To have the logout working properly you have to add a client Backchannel logout URL for the client meant for browser authentication (`graph-insights`): -- Select the `eccenca Corporate Memory`-realm -- Select `Clients` -- Select the client `graph-insights`. -- Scroll down to this section and add this: `https:///graphinsights/logout/connect/back-channel/keycloak` +- Select the `eccenca Corporate Memory`-realm +- Select `Clients` +- Select the client `graph-insights`. +- Scroll down to this section and add this: `https:///graphinsights/logout/connect/back-channel/keycloak` ![Keycloak backchannel LogExplore select_graphinsights](keycloak-client-backchannel.png) diff --git a/docs/deploy-and-configure/configuration/keycloak/change-passwords-and-keys/index.md b/docs/deploy-and-configure/configuration/keycloak/change-passwords-and-keys/index.md index b5b32d956..8eed7ed7a 100644 --- a/docs/deploy-and-configure/configuration/keycloak/change-passwords-and-keys/index.md +++ b/docs/deploy-and-configure/configuration/keycloak/change-passwords-and-keys/index.md @@ -75,4 +75,3 @@ The following make targets will create a database dump, store it in `data/backu ```shell-session make keycloak-backup keycloak-restore ``` - diff --git a/docs/deploy-and-configure/configuration/keycloak/index.md b/docs/deploy-and-configure/configuration/keycloak/index.md index 2eee49ecb..975066327 100644 --- a/docs/deploy-and-configure/configuration/keycloak/index.md +++ b/docs/deploy-and-configure/configuration/keycloak/index.md @@ -26,23 +26,23 @@ The screenshots displayed in this documentation were taken from Keycloak v20 usi To create a realm, use the drop down menu for choosing a realm on the left side. -- Create a realm `cmem` +- Create a realm `cmem` - Select **Realm settings** - - **General** tab: - - Change HTML Display name to `Corporate Memory` + - **General** tab: + - Change HTML Display name to `Corporate Memory` - **Themes** tab - - Switch realm's login theme to `eccenca` - - Switch realm's account theme to `eccenca` + - Switch realm's login theme to `eccenca` + - Switch realm's account theme to `eccenca` ## Client configuration Clients are used to link users and groups managed in Keycloak to Corporate Memory. There are two different types of clients used by Corporate Memory: -- The first client is used to authenticate a user for using the web interface (usually named `cmem`). -- The second client is used as a technical user with the command line interface (usually named `cmem-service-account`). -- (optional, when using Graph Insights) similar to the `cmem` client you can configure Graph Insights to use a separate client (usually named `graph-insights`) -- (optional, when using Graph Insights) similar to the `cmem-service-account` client you can configure Graph Insights to communicate with a separate client (usually named `graph-insights-service-account`) +- The first client is used to authenticate a user for using the web interface (usually named `cmem`). +- The second client is used as a technical user with the command line interface (usually named `cmem-service-account`). +- (optional, when using Graph Insights) similar to the `cmem` client you can configure Graph Insights to use a separate client (usually named `graph-insights`) +- (optional, when using Graph Insights) similar to the `cmem-service-account` client you can configure Graph Insights to communicate with a separate client (usually named `graph-insights-service-account`) For Graph Insight please refer the [Graph Insights OAUTH documentation](../../../deploy-and-configure/configuration/graphinsights/index.md#oauth-configuration) Depending on the environment, there might be other use cases, when running background schedules, then a third client, also as technical user, might be useful. @@ -53,11 +53,11 @@ Corporate Memory uses access conditions which are related to users or groups. This is described at [Access Conditions](./../access-conditions/index.md). To use groups from Keycloak in Corporate Memory access conditions, all Keycloak client configurations need to have attached mappers: -- For the web interface client (`cmem`), the user groups need to get attached to the client. +- For the web interface client (`cmem`), the user groups need to get attached to the client. This is done by a **Group Membership** mapper (described below). With this mapper each group of a user is assigned for the authentication process, so Corporate Memory is aware of the user and group IDs for setting up access conditions. -- For the technical account clients (such as `cmem-service-account`), Keycloak does not allow to add groups directly to a client. +- For the technical account clients (such as `cmem-service-account`), Keycloak does not allow to add groups directly to a client. To work around this limitation, we are using **ROLES** instead. By creating a mapper to re-define roles from groups, we allow Corporate Memory to read roles as groups attached to the client token. @@ -73,11 +73,11 @@ To import a pre-configured `cmem` client for using the web interface, follow the ![Dialog import cmem client](import-client-cmem.png) -- Login to Keycloak and select the Corporate Memory realm (`cmem`). -- Download the [client configuration for using the web interface](cmem.json) (`cmem.json`). -- Select **Clients**, then **Import client**. -- **Browse** for the downloaded `cmem.json` and select it. -- **Save** new client. +- Login to Keycloak and select the Corporate Memory realm (`cmem`). +- Download the [client configuration for using the web interface](cmem.json) (`cmem.json`). +- Select **Clients**, then **Import client**. +- **Browse** for the downloaded `cmem.json` and select it. +- **Save** new client. To import a pre-configured `cmem-service-account` client, repeat the process with the [client configuration with credentials for the technical account (`cmem-service-account`)](cmem-service-account.json) (`cmem-service-account.json`). After importing add the `elds-admins` role mapper to the client. See in the manual section of [Add the `cmem-service-account` client](#serviceaccountroles) @@ -89,45 +89,45 @@ This client is intended for the usage with Explore and Build (DataIntegration) ( ![Dialog create cmem client](createClient_1.png){ class="bordered" } -- **Client type**: OpenID Connect -- **Client ID**: i.e. `cmem`, you need to remember this and use this later -- **Name** and **Description**: fill as you like -- Select **Next** -- **Client authentication**: Off -- **Authorization**: Off -- Enable **Standard Flow Enabled** (enables OAuth 2.0 Authorization Code Flow) -- Before v23.1: - - Additionally enable **Implicit Flow Enabled** -- **Save** +- **Client type**: OpenID Connect +- **Client ID**: i.e. `cmem`, you need to remember this and use this later +- **Name** and **Description**: fill as you like +- Select **Next** +- **Client authentication**: Off +- **Authorization**: Off +- Enable **Standard Flow Enabled** (enables OAuth 2.0 Authorization Code Flow) +- Before v23.1: + - Additionally enable **Implicit Flow Enabled** +- **Save** ![Dialog create cmem client](createClient_2.png){ class="bordered" } The dialog above closes and you land on the configuration page of this client: -- **Valid redirect URIs**: Add the correct URL pattern (e.g., wildcard `https://cmem.example.net/*` works) to `Valid Redirect URIs` (`*` for testing purposes can be used as well) and **Save** -- Switch the Tabs to **Client scopes** and click on the first scope (i.e.: `cmem-dedicated`) +- **Valid redirect URIs**: Add the correct URL pattern (e.g., wildcard `https://cmem.example.net/*` works) to `Valid Redirect URIs` (`*` for testing purposes can be used as well) and **Save** +- Switch the Tabs to **Client scopes** and click on the first scope (i.e.: `cmem-dedicated`) ![Dialog select cmem-service-account-dedicated](createClient_11.png){ class="bordered" } ![Dialog create mapper](createClient_4.png){ class="bordered" } ![Dialog create mapper](createClient_5.png){ class="bordered" } -- Click **Configure a new mapper** - - Select Mapper Type **Group Membership** - - **Name** `groups` - - **Token Claim Name** `groups` - - Disable **Full group path** - - Disable **Add to ID token** - - Enable **Add to access token** - - Enable **Add to user info** -- **Save** +- Click **Configure a new mapper** + - Select Mapper Type **Group Membership** + - **Name** `groups` + - **Token Claim Name** `groups` + - Disable **Full group path** + - Disable **Add to ID token** + - Enable **Add to access token** + - Enable **Add to user info** +- **Save** ![Dialog create mapper](createClient_6.png){ class="bordered" } -- In Corporate Memory configuration until v22.2: - - Configure this client ID under `js.config.workspaces.default.authorization.oauth2.clientId` in DataManager's configuration file (Datamanager needs implicit flow) - - Configure this client ID under `oauth.clientId = "cmem"` in DataManager's configuration file (Dataintegration needs standard flow) -- In Corporate Memory configuration from v23.1: - - Configure this client ID in the environments with the name `OAUTH_CLIENT_ID` in `/environments/config.env` (defaults to `cmem` if not set) +- In Corporate Memory configuration until v22.2: + - Configure this client ID under `js.config.workspaces.default.authorization.oauth2.clientId` in DataManager's configuration file (Datamanager needs implicit flow) + - Configure this client ID under `oauth.clientId = "cmem"` in DataManager's configuration file (Dataintegration needs standard flow) +- In Corporate Memory configuration from v23.1: + - Configure this client ID in the environments with the name `OAUTH_CLIENT_ID` in `/environments/config.env` (defaults to `cmem` if not set) #### Add the `cmem-service-account` client @@ -140,75 +140,75 @@ For this, just create a different role name later, and create an access conditio ![Dialog create role](createClient_7_2.png){ class="bordered" } ![Dialog create role](createClient_7_3.png){ class="bordered" } -- **Client type**: OpenID Connect -- **Client ID**: i.e. `cmem-service-account`, you need to remember this and use this later -- **Name** and **Description**: fill as you like -- click **Next** -- **Client authentication**: On -- **Authorization**: Off -- **Authentication flow**: only enable `Service accounts roles`, the rest can be disabled -- **Save** +- **Client type**: OpenID Connect +- **Client ID**: i.e. `cmem-service-account`, you need to remember this and use this later +- **Name** and **Description**: fill as you like +- click **Next** +- **Client authentication**: On +- **Authorization**: Off +- **Authentication flow**: only enable `Service accounts roles`, the rest can be disabled +- **Save** -- Go to **Credentials** and configure **Client Id and Secret**, copy the client secret for later usage +- Go to **Credentials** and configure **Client Id and Secret**, copy the client secret for later usage ![Dialog create role](createClient_7_4.png){ class="bordered" } -- Go to **Roles** and click **Create role** to create the `elds-admins` role +- Go to **Roles** and click **Create role** to create the `elds-admins` role ![Dialog create role](createClient_7.png){ class="bordered" } ![Dialog create role](createClient_8.png){ class="bordered" } -- Click **Action** and select **Add associated roles** +- Click **Action** and select **Add associated roles** ![Dialog create role](createClient_9.png){ class="bordered" } -- Select **Filter by client** from the filter pull-down-menu +- Select **Filter by client** from the filter pull-down-menu ![Dialog create role](createClient_10.png){ class="bordered" } -- In this dialog select the client by name which you are currently configuring (here `cmem-service-account`) and then **Assign** +- In this dialog select the client by name which you are currently configuring (here `cmem-service-account`) and then **Assign** ![Dialog create role](createClient_10_1.png){ class="bordered" } -- Go back to **Client details** e.g., by using the top navigation -- In the **Roles** tab you now see your created role here +- Go back to **Client details** e.g., by using the top navigation +- In the **Roles** tab you now see your created role here ![Dialog create role](createClient_10_2.png){ class="bordered" } -- Switch the Tabs to **Client scopes** and click on the first scope (i.e.: `cmem-service-account-dedicated`) +- Switch the Tabs to **Client scopes** and click on the first scope (i.e.: `cmem-service-account-dedicated`) ![Dialog create mapper](createClient_11.png){ class="bordered" } -- select **Add mapper** -> **By configuration** +- select **Add mapper** -> **By configuration** ![Dialog create mapper](createClient_13_1.png){ class="bordered" } -- select Mapper Type `User Client Role` - - **Name** `roles` - - **Client ID** select the client you are currently configuring from the pull-down-menu (here `cmem-service-account`) - - Enable **Multivalued** - - **Token Claim Name** `groups` - - Enable **Add to ID token** - - Enable **Add to access token** - - Enable **Add to user info** -- **Save** +- select Mapper Type `User Client Role` + - **Name** `roles` + - **Client ID** select the client you are currently configuring from the pull-down-menu (here `cmem-service-account`) + - Enable **Multivalued** + - **Token Claim Name** `groups` + - Enable **Add to ID token** + - Enable **Add to access token** + - Enable **Add to user info** +- **Save** ![Dialog create mapper](createClient_13.png){ class="bordered" } ![Dialog create mapper](createClient_14.png){ class="bordered" } -- After **Save** go back to **Client details** -- Go to **Service account roles** tab -- Select the link in the center **To manage detail and group mappings, click on the username service-account-YOUR_CLIENT_ID** +- After **Save** go back to **Client details** +- Go to **Service account roles** tab +- Select the link in the center **To manage detail and group mappings, click on the username service-account-YOUR_CLIENT_ID** ![Dialog add role to client](createClient_15.png){ class="bordered" } -- Go to tab **Role mapping** and select **Assign role** +- Go to tab **Role mapping** and select **Assign role** ![Dialog add role to client](createClient_16.png){ class="bordered" } -- Change the filter to **Filter by clients** and select the new Client ID, i.e `cmem-service-account` -- Click **Assign** +- Change the filter to **Filter by clients** and select the new Client ID, i.e `cmem-service-account` +- Click **Assign** ![Dialog add role to client](createClient_16_2.png){ class="bordered" } ![Dialog add role to client](createClient_16_1.png){ class="bordered" } @@ -216,8 +216,8 @@ For this, just create a different role name later, and create an access conditio ## Corporate Memory configuration after setting up clients -- If **Build (DataIntegration)** schedulers are required, configure this client id and secret under the properties `workbench.superuser.client` and `workbench.superuser.clientSecret` in Build (DataIntegration)'s configuration file or -- in `docker compose`-orchestration you can edit this in the environment as: +- If **Build (DataIntegration)** schedulers are required, configure this client id and secret under the properties `workbench.superuser.client` and `workbench.superuser.clientSecret` in Build (DataIntegration)'s configuration file or +- in `docker compose`-orchestration you can edit this in the environment as: ``` bash CMEM_SERVICE_ACCOUNT_CLIENT_ID=cmem-service-account @@ -226,27 +226,27 @@ For this, just create a different role name later, and create an access conditio DATAINTEGRATION_CMEM_SERVICE_CLIENT_SECRET=YourSecret ``` -- in helm this value is defined by: +- in helm this value is defined by: ``` yaml DATAINTEGRATION_CMEM_SERVICE_CLIENT_SECRET: {{ .Values.global.cmemClientSecret }} DATAINTEGRATION_CMEM_SERVICE_CLIENT: {{ .Values.global.cmemClientId }} ``` -- For **cmemc** you can configure this with `OAUTH_CLIENT_ID` and `OAUTH_CLIENT_SECRET`. +- For **cmemc** you can configure this with `OAUTH_CLIENT_ID` and `OAUTH_CLIENT_SECRET`. ### Groups configuration -- Go to **Groups** and add the following groups: - - `elds-admins` - - These groups are used only to assign them to user accounts (clients have roles-to-group mappers). - - Any groups provided by your user management system (e.g. LDAP) that must be recognized/mapped by Keycloak - - Corporate Memory does not come with any other groups. Those are optional and can be defined here. +- Go to **Groups** and add the following groups: + - `elds-admins` + - These groups are used only to assign them to user accounts (clients have roles-to-group mappers). + - Any groups provided by your user management system (e.g. LDAP) that must be recognized/mapped by Keycloak + - Corporate Memory does not come with any other groups. Those are optional and can be defined here. ### Users configuration -- This applies to the [Docker Orchestration](./../docker-orchestration/index.md), for other setups consult the [Keycloak manual](https://www.keycloak.org/docs/latest/server_admin/). -- Go to `Users` -- Add the following users and assign their groups respectively (for each user go to credentials, add password and disable `Temporary`) - - `admin:admin` - - groups: `elds-admins` +- This applies to the [Docker Orchestration](./../docker-orchestration/index.md), for other setups consult the [Keycloak manual](https://www.keycloak.org/docs/latest/server_admin/). +- Go to `Users` +- Add the following users and assign their groups respectively (for each user go to credentials, add password and disable `Temporary`) + - `admin:admin` + - groups: `elds-admins` diff --git a/docs/deploy-and-configure/configuration/keycloak/using-external-keycloak/index.md b/docs/deploy-and-configure/configuration/keycloak/using-external-keycloak/index.md index fb3e98880..9dc583584 100644 --- a/docs/deploy-and-configure/configuration/keycloak/using-external-keycloak/index.md +++ b/docs/deploy-and-configure/configuration/keycloak/using-external-keycloak/index.md @@ -19,9 +19,9 @@ For this scenario, this page provides additional configuration requirements. Depending on your infrastructure around Corporate Memory, you need to change some provisioned HTTP header on the following services: - Headers for Keycloak URLs: - - `Access-Control-Allow-Origin: https://cmem.example.com` + - `Access-Control-Allow-Origin: https://cmem.example.com` - Headers for Corporate Memory URLs: - - `Access-Control-Allow-Origin: *` + - `Access-Control-Allow-Origin: *` For example, if you are using our helm charts, adapt the followin ingress annotations: @@ -37,7 +37,6 @@ For example, if you are using our helm charts, adapt the followin ingress annota nginx.ingress.kubernetes.io/cors-allow-origin: "*" ``` - ### Keycloak You have to allow the Corporate Memory domain in the Keycloak settings. @@ -122,4 +121,3 @@ In either way you can configure the base realm path in the global value section: # keycloak oauth client id (used for DataPlatform connection and DataIntegration cmem service client) oauthClientId: cmem ``` - diff --git a/docs/deploy-and-configure/configuration/label-resolution-and-full-text-search/index.md b/docs/deploy-and-configure/configuration/label-resolution-and-full-text-search/index.md index 909b8d569..4d9d1a17e 100644 --- a/docs/deploy-and-configure/configuration/label-resolution-and-full-text-search/index.md +++ b/docs/deploy-and-configure/configuration/label-resolution-and-full-text-search/index.md @@ -49,12 +49,11 @@ How labels are resolved is best explained using these default settings and some - For `:Resource1` the label will be `Leipzig` as the english `rdfs:label` will be retrieved. - For `:Resource2` the label cannot be retrieved from the Knowledge Graph since no known property is used. Hence the fallback. - For `:Resource3` the label will be retrieved as `Stuttgart`, if the `languagePreferencesAnyLangFallback` is `true`. - - While there is a well-known property used, none of the used languages match. Using the fallback, the alphabetically first match is retrieved in this case. + - While there is a well-known property used, none of the used languages match. Using the fallback, the alphabetically first match is retrieved in this case. - For `:Resource4` multiple label candidates could be determined. - - In this case, `Another Label for Hanover` is retrieved as it is the first value in the alphanumerical comparison. + - In this case, `Another Label for Hanover` is retrieved as it is the first value in the alphanumerical comparison. ## Client API The label resolution functionality can also be used by client systems. This functionality is exposed as an [API endpoint](../../../develop/dataplatform-apis/index.md) (`/api/explore/title`). - diff --git a/docs/deploy-and-configure/configuration/production-ready-settings/index.md b/docs/deploy-and-configure/configuration/production-ready-settings/index.md index 4cfe49451..cc6942189 100644 --- a/docs/deploy-and-configure/configuration/production-ready-settings/index.md +++ b/docs/deploy-and-configure/configuration/production-ready-settings/index.md @@ -29,6 +29,7 @@ For Explore backend (DataPlatform) you set this in `application.yml` or as envir ```yaml deploy.post-logout-redirect-uri: "${DEPLOY_BASE_URL}" ``` + ```bash DEPLOY_POST_LOGOUT_REDIRECT_URI=${DEPLOY_BASE_URL} ``` @@ -50,7 +51,6 @@ oauth.logoutRedirectUrl = ${OAUTH_LOGOUT_REDIRECT_URL} oauth.idToken = true ``` - ## Password Policies If you create users in Keycloak, make sure these users have strong passwords. @@ -93,7 +93,6 @@ play.http.session.secure = ${DATAINTEGRATION_SECURE_COOKIE} In the [Play documentation](https://www.playframework.com/documentation/2.8.x/SettingsSession), you can find further information, i.e. also setting `sameSite = "lax"`or `strict`. By default Build (DataIntegration) sets this to `lax` - ## CORS Settings ### Explore backend (DataPlatform) @@ -145,4 +144,3 @@ cors.config.allowOrigins = "*" # If set to true, allowOrigins must not have '*' configured. cors.config.allowCredentials = false ``` - diff --git a/docs/deploy-and-configure/configuration/quad-store-configuration/index.md b/docs/deploy-and-configure/configuration/quad-store-configuration/index.md index d256176bc..18171cf2f 100644 --- a/docs/deploy-and-configure/configuration/quad-store-configuration/index.md +++ b/docs/deploy-and-configure/configuration/quad-store-configuration/index.md @@ -13,7 +13,7 @@ Ensure to add the suggested parameters to the corresponding subsections of the c ### Compatibility -- **Virtuoso 7.2.4.2** - Explore is compatible with [Virtuoso 7.2.4.2](https://github.com/openlink/virtuoso-opensource/releases/tag/v7.2.4.2). Compatibility with other versions is not guaranteed. +- **Virtuoso 7.2.4.2** - Explore is compatible with [Virtuoso 7.2.4.2](https://github.com/openlink/virtuoso-opensource/releases/tag/v7.2.4.2). Compatibility with other versions is not guaranteed. ### Configuration @@ -66,36 +66,36 @@ This section only covers limitations and options which have a direct impact on t ### Compatibility -- **Stardog 7.2.1** - Explore is compatible with [Stardog version 7.1.1](http://docs.stardog.com/). Compatibility with newer versions is not guaranteed. -- Stardog 6.2.3 (deprecated) - Explore is compatible with [Stardog version 6.2.3](https://www.stardog.com/docs/6.2.3/). +- **Stardog 7.2.1** - Explore is compatible with [Stardog version 7.1.1](http://docs.stardog.com/). Compatibility with newer versions is not guaranteed. +- Stardog 6.2.3 (deprecated) - Explore is compatible with [Stardog version 6.2.3](https://www.stardog.com/docs/6.2.3/). !!! note Support for 6.2.3 is deprecated and will be removed in later Explore releases. ### Configuration -- **Search enabled**\ +- **Search enabled**\ Explore relies on the Stardog Semantic Search, which has to be enabled by setting: - - `search.enabled=true`\ + - `search.enabled=true`\ You can set this property using either Stardog Studio or the `stardog-admin` commands.\ Refer to the [Stardog documentation](https://www.stardog.com/docs/#_configuration_options) for more detailed information. -- **Server side named graph security**\ +- **Server side named graph security**\ If the `PROVISIONED` access control strategy is used for the configured endpoint, you have to set the property `security.named.graphs=true` for the configured database as explained in the [Stardog documentation](http://docs.stardog.com/#_named_graph_security). Additionally, the following properties are required: - - `password.length.max`: For the provisioned mode to work properly. This property should have a value of at least 64. - - `password.regex`: The default value configured in Stardog is not compatible with the passwords generated by Explore. The regex should be `[\\w+\\/=]+` + - `password.length.max`: For the provisioned mode to work properly. This property should have a value of at least 64. + - `password.regex`: The default value configured in Stardog is not compatible with the passwords generated by Explore. The regex should be `[\\w+\\/=]+` -- **SSL support**\ +- **SSL support**\ Mandatory configuration if `sparqlEndpoints.stardog[i].sslEnabled=true`. The server must have a valid certificate which must be trusted by the system where Explore runs. In this case, the `sparqlEndpoints.stardog[i].port` property must point to the SSL port (which default value is `5821`).\ Consult the [Configuring Stardog to use SSL](https://www.stardog.com/docs/#_configuring_stardog_to_use_ssl) section of Stardog's manual for more information on the topic. -- **Query timeout override**\ +- **Query timeout override**\ In order to allow Explore to override the query timeout for individual queries, you have to ensure that the property `query.timeout.override.enabled` for the database is set to `true` (which is the default).\ Consult the [Configuring Query Management](https://www.stardog.com/docs/#_configuring_query_management) section of Stardog's manual for further information. ### Limitations -- **Quad format upload**\ +- **Quad format upload**\ The Graph Store Protocol implementation for Stardog does not support uploading of RDF quad data (TriG, N-Quads). -- **Initial connection**\ +- **Initial connection**\ The first request to Explore can take several seconds due to connection startup to the Stardog server. ### Caveats @@ -166,7 +166,7 @@ This section covers only limitations and options which have a direct impact on t ### Compatibility -- GraphDB 9.2.0 - Explore is compatible with [GraphDB version 9.2.0](http://graphdb.ontotext.com/documentation/standard/release-notes.html#graphdb-9-2-0). Compatibility with newer versions is not guaranteed. +- GraphDB 9.2.0 - Explore is compatible with [GraphDB version 9.2.0](http://graphdb.ontotext.com/documentation/standard/release-notes.html#graphdb-9-2-0). Compatibility with newer versions is not guaranteed. ### Configuration diff --git a/docs/deploy-and-configure/configuration/reverse-proxy/index.md b/docs/deploy-and-configure/configuration/reverse-proxy/index.md index 527e520c4..13b040b64 100644 --- a/docs/deploy-and-configure/configuration/reverse-proxy/index.md +++ b/docs/deploy-and-configure/configuration/reverse-proxy/index.md @@ -8,9 +8,9 @@ A reverse proxy forwards all requests from the users to the called service and r Reverse proxy is a necessary component in the Corporate Memory deployment. It enables you to: -- define routes for all the components within one domain name -- expose only ports 80 and 443 to the outside network, all other communication would be performed in the internal network -- ease configuration and management of the SSL certificates +- define routes for all the components within one domain name +- expose only ports 80 and 443 to the outside network, all other communication would be performed in the internal network +- ease configuration and management of the SSL certificates This also enables you to activate the [Linked Data delivery mode](#linked-data-delivery-mode) of Explore. The Linked Data delivery mode is able to serve Linked Data that uses the same namespace as the configured domain name as resolvable URIs including content negotiation. @@ -80,9 +80,9 @@ The Linked Data delivery mode is able to serve data that uses the same namespace Therefore you can use the following template (e.g.: ): --  (DataPlatform) --  (a custom vocabulary) -- with HTTPS enforcement (recommended) +-  (DataPlatform) +-  (a custom vocabulary) +- with HTTPS enforcement (recommended) apache sample config for linked data delivery diff --git a/docs/deploy-and-configure/index.md b/docs/deploy-and-configure/index.md index 9164114bd..395e96060 100644 --- a/docs/deploy-and-configure/index.md +++ b/docs/deploy-and-configure/index.md @@ -11,29 +11,28 @@ Deploy and configure eccenca Corporate Memory in your own environment.
-- :eccenca-application-homepage: [System Architecture](system-architecture/index.md) +- :eccenca-application-homepage: [System Architecture](system-architecture/index.md) --- This page describes the overall system architecture of eccenca Corporate Memory and its components. -- :material-exclamation-thick: [Requirements](requirements/index.md) +- :material-exclamation-thick: [Requirements](requirements/index.md) --- This page lists software and hardware requirements for eccenca Corporate Memory deployments. -- :material-download-circle-outline: [Installation](installation/index.md) +- :material-download-circle-outline: [Installation](installation/index.md) --- These pages describe proven deployment scenarios for eccenca Corporate Memory. -- :material-cog-outline: [Configuration](configuration/index.md) +- :material-cog-outline: [Configuration](configuration/index.md) --- These pages describe specific topics on how to configure eccenca Corporate Memory.
- diff --git a/docs/deploy-and-configure/installation/migrating-stores/index.md b/docs/deploy-and-configure/installation/migrating-stores/index.md index 3443685a9..a3930cf26 100644 --- a/docs/deploy-and-configure/installation/migrating-stores/index.md +++ b/docs/deploy-and-configure/installation/migrating-stores/index.md @@ -28,7 +28,7 @@ icon: material/database - best practice: - run all (SELECT) queries in the query catalog and compare results (e.g. with `cmemc`) - - theoretically this could also be applied to INSERT queries (by re-writing into SELECTS in case you want / need to omit altering your graphs) + - theoretically this could also be applied to INSERT queries (by re-writing into SELECTS in case you want / need to omit altering your graphs) - count all triples in all graphs on both instances before/after export/import (`cmemc graph count --all`) ## Optimizing Your Setup diff --git a/docs/deploy-and-configure/installation/scenario-single-node-cloud-installation/index.md b/docs/deploy-and-configure/installation/scenario-single-node-cloud-installation/index.md index fae8e80f9..f35d7de3e 100644 --- a/docs/deploy-and-configure/installation/scenario-single-node-cloud-installation/index.md +++ b/docs/deploy-and-configure/installation/scenario-single-node-cloud-installation/index.md @@ -9,11 +9,11 @@ This page describes a docker-compose based orchestration running on a server ins ## Requirements -- ssh access to a server instance (Debian 11) with a public IP address -- A resolvable domain name to this server -- Terminal with ssh client installed locally -- An eccenca partner account for the docker registry as well as the release artifact area -- A GraphDB license ([free](https://www.ontotext.com/products/graphdb/) or commercial) +- ssh access to a server instance (Debian 11) with a public IP address +- A resolvable domain name to this server +- Terminal with ssh client installed locally +- An eccenca partner account for the docker registry as well as the release artifact area +- A GraphDB license ([free](https://www.ontotext.com/products/graphdb/) or commercial) !!! Info make - do not use version 4.4.1 [→](https://savannah.gnu.org/bugs/?63650) @@ -165,7 +165,6 @@ systemctl start cmem-orchestration If you want to have Graph Insights enabled as well, first you need a license. Then you can simiply run this: - ``` shell mkdir licenses ln -s your-license-file.lic graphinsights.lic @@ -190,7 +189,6 @@ You can now proceed to the :material-arrow-right: [Getting Started](../../../ge Do not forget to change the passwords of your deployment, especially if it is available from the public internet. For this, take a look at [Change Passwords and Keys](../../configuration/keycloak/change-passwords-and-keys/index.md). - ```bash cp /opt/cmem-orchestration/conf/systemd/cmem-orchestration.service \ /etc/systemd/system @@ -233,4 +231,5 @@ zip -r data/backups/2024-07-26_14-15.zip data/backups/keycloak/2024-07-26_14-15. ln -sf 2024-07-26_14-15.zip data/backups/latest.zip ``` + The full backup is now at `data/backups/latest.zip`. diff --git a/docs/deploy-and-configure/requirements/graph-insights-sizing.md b/docs/deploy-and-configure/requirements/graph-insights-sizing.md index 5b53e1be9..e1b06def2 100644 --- a/docs/deploy-and-configure/requirements/graph-insights-sizing.md +++ b/docs/deploy-and-configure/requirements/graph-insights-sizing.md @@ -34,10 +34,10 @@ Since Graph Insights uses caching for performance reasons the latter will increa Disk space consumption can be estimated in close relation to working memory usage: -- On average, **~30 MB per 1 million triples** are required both in memory and on disk. -- In **managed mode**, Graph Insights maintains two index directories (to support hot-swapping during reindexing). +- On average, **~30 MB per 1 million triples** are required both in memory and on disk. +- In **managed mode**, Graph Insights maintains two index directories (to support hot-swapping during reindexing). In this case, disk usage is roughly **~60 MB per 1 million triples**. -- Actual disk usage may vary depending on dataset characteristics (e.g., schema, dictionary compression ratio, and indexing options). +- Actual disk usage may vary depending on dataset characteristics (e.g., schema, dictionary compression ratio, and indexing options). For practical planning, disk space can be roughly approximated by scaling the memory requirements listed in the table above with the adjustment for managed mode if applicable. @@ -79,5 +79,5 @@ The deployed exploration operations in our benchmark consisted of multiple queri We ran 847 exploration queries, selected to be representative of the dataset (covering different group node sizes and varying numbers of connected resources). The experiments were conducted across a variety of datasets up to 250M triples: -- **Maximum execution time:** ~10s for 1-2 threads, improved to ~5s with 3 threads, and ~4.4s with 4 or more threads. -- **75th percentile:** ~2.6-2.8s with 1-2 threads, improved to ~1.3s with 4 threads, and ~1s with 6+ threads.- **Beyond 4 threads:** no further improvement in maximum times was observed, though 75th percentile times continued to improve slightly. +- **Maximum execution time:** ~10s for 1-2 threads, improved to ~5s with 3 threads, and ~4.4s with 4 or more threads. +- **75th percentile:** ~2.6-2.8s with 1-2 threads, improved to ~1.3s with 4 threads, and ~1s with 6+ threads.- **Beyond 4 threads:** no further improvement in maximum times was observed, though 75th percentile times continued to improve slightly. diff --git a/docs/deploy-and-configure/requirements/index.md b/docs/deploy-and-configure/requirements/index.md index f9257285d..b90726e19 100644 --- a/docs/deploy-and-configure/requirements/index.md +++ b/docs/deploy-and-configure/requirements/index.md @@ -12,21 +12,21 @@ For a general overview of a deployment setup please refer to the [System Archit A minimal single-node deployment for testing/evaluation purposes means: -- no memory consuming linking and transformation workflows, -- nearly no concurrent users. +- no memory consuming linking and transformation workflows, +- nearly no concurrent users. Depending on how much RAM is dedicated to the triple store, Knowledge Graphs up to several million triples can be built and served. -- Operating System / Hardware - - Bare metal server or VM with Debian based linux OS (see [Installation](./../installation/index.md) for details) - - 16 GB RAM - - 100 GB free disk space (10 GB for docker images + data + logs over time) - - docker and docker compose (we deliver an orchestration including all needed components) +- Operating System / Hardware + - Bare metal server or VM with Debian based linux OS (see [Installation](./../installation/index.md) for details) + - 16 GB RAM + - 100 GB free disk space (10 GB for docker images + data + logs over time) + - docker and docker compose (we deliver an orchestration including all needed components) For an example of a single-node installation refer to the following scenarios: -- [Scenario: Local Installation](../installation/scenario-local-installation/index.md) -- [Scenario: Single Node Cloud Installation](../installation/scenario-single-node-cloud-installation/index.md) +- [Scenario: Local Installation](../installation/scenario-local-installation/index.md) +- [Scenario: Single Node Cloud Installation](../installation/scenario-single-node-cloud-installation/index.md) ## Typical Setup @@ -50,16 +50,15 @@ For GraphDB always also have a look at [GraphDB recommendations](https://graphdb but on the sum of triples in all Graph Insights Snapshots. Have a look at the cmemc command group [graph insights](../../automate/cmemc-command-line-interface/command-reference/graph/insights/index.md) to manage existing snapshots. - ## Clients ### Browser / Web Client We support all (LTS/ESR) versions of the below listed browsers that are actively supported be the respective publishers: -- Microsoft Edge > v88.0 -- Google Chrome or Chromium > v92.0 -- Firefox > v78.0 +- Microsoft Edge > v88.0 +- Google Chrome or Chromium > v92.0 +- Firefox > v78.0 !!! note diff --git a/docs/deploy-and-configure/system-architecture/index.md b/docs/deploy-and-configure/system-architecture/index.md index ecbb656e7..a44006f63 100644 --- a/docs/deploy-and-configure/system-architecture/index.md +++ b/docs/deploy-and-configure/system-architecture/index.md @@ -15,13 +15,13 @@ This page describes the overall system architecture of eccenca Corporate Memory eccenca Corporate Memory consists of three core components: -- [eccenca Build](../configuration/dataintegration/index.md) -- [eccenca Explore](../configuration/explore/index.md), -- [cmemc (Corporate Memory Control)](../../automate/cmemc-command-line-interface/index.md), +- [eccenca Build](../configuration/dataintegration/index.md) +- [eccenca Explore](../configuration/explore/index.md), +- [cmemc (Corporate Memory Control)](../../automate/cmemc-command-line-interface/index.md), and one optional component: -- [eccenca Graph Insights](../../explore-and-author/graph-exploration/graph-insights/index.md). +- [eccenca Graph Insights](../../explore-and-author/graph-exploration/graph-insights/index.md). _Build_ is the Corporate Memory component which enables integration of datasets into a single consistent knowledge graph. Datasets in their original format are mapped to RDF schemata and then linked to and persisted into a knowledge graph. diff --git a/docs/develop/accessing-graphs-with-java-applications/index.md b/docs/develop/accessing-graphs-with-java-applications/index.md index 7031c0fe0..035f6c4a7 100644 --- a/docs/develop/accessing-graphs-with-java-applications/index.md +++ b/docs/develop/accessing-graphs-with-java-applications/index.md @@ -95,4 +95,3 @@ public class HTTPClient { } } ``` - diff --git a/docs/develop/cmemc-scripts/index.md b/docs/develop/cmemc-scripts/index.md index 9bf94e57f..a4dc768a4 100644 --- a/docs/develop/cmemc-scripts/index.md +++ b/docs/develop/cmemc-scripts/index.md @@ -26,7 +26,6 @@ cmemc is published as an Apache 2 licensed open source python package at [pypi.o pip install cmem-cmemc ``` - ## Configure a connection Assuming you have already [configured your cmemc connection](../../automate/cmemc-command-line-interface/configuration/file-based-configuration/index.md), using it in Python scripts is quite easy. @@ -160,4 +159,3 @@ Workflow 'only-input-replaceable' in 'io' Workflow 'only-output' in 'io' Workflow 'only-output-replaceable' in 'io' ``` - diff --git a/docs/develop/cmempy-python-api/index.md b/docs/develop/cmempy-python-api/index.md index 7282dca7b..d412ec563 100644 --- a/docs/develop/cmempy-python-api/index.md +++ b/docs/develop/cmempy-python-api/index.md @@ -30,7 +30,7 @@ The following table lists all processed environment variables: | Variable | Description | Default Value | | ------------------- | ----------------------------------------------------- | ------------------------------------------------------------ | -| CMEM_BASE_URI | Base URL of your Corporate Memory | http://docker.localhost | +| CMEM_BASE_URI | Base URL of your Corporate Memory | | | DI_API_ENDPOINT | Build (Data Integration) API endpoint | CMEM_BASE_URI/dataintegration | | DP_API_ENDPOINT | Explore backend API endpoint | CMEM_BASE_URI/dataplatform | | OAUTH_TOKEN_URI | OAuth 2.0 Token endpoint | CMEM_BASE_URI/auth/realms/cmem/protocol/openid-connect/token | diff --git a/docs/develop/dataintegration-apis/index.md b/docs/develop/dataintegration-apis/index.md index d0e6d2cdf..5c7f468c8 100644 --- a/docs/develop/dataintegration-apis/index.md +++ b/docs/develop/dataintegration-apis/index.md @@ -30,4 +30,3 @@ Dependent on the specific API, eccenca Build (DataIntegration) works with the fo | application/xml | [XML Media Types](https://tools.ietf.org/html/rfc7303) | | application/n-triples | [RDF 1.1 N-Triples - A line-based syntax for an RDF graph](https://www.w3.org/TR/n-triples/) | | application/problem+json | [Problem Details for HTTP APIs](https://tools.ietf.org/html/rfc7807) | - diff --git a/docs/develop/dataplatform-apis/index.md b/docs/develop/dataplatform-apis/index.md index 70defe025..e4f18d0e6 100644 --- a/docs/develop/dataplatform-apis/index.md +++ b/docs/develop/dataplatform-apis/index.md @@ -94,8 +94,8 @@ Furthermore, it says: > > The FROM and FROM NAMED keywords allow a query to specify an RDF dataset by reference; they indicate that the dataset should include graphs that are obtained from representations of the resources identified by the given IRIs (i.e. the absolute form of the given IRI references). The dataset resulting from a number of FROM and FROM NAMED clauses is: > -> - a default graph consisting of the RDF merge of the graphs referred to in the FROM clauses, and -> - a set of (IRI, graph) pairs, one from each FROM NAMED clause. +> - a default graph consisting of the RDF merge of the graphs referred to in the FROM clauses, and +> - a set of (IRI, graph) pairs, one from each FROM NAMED clause. > > If there is no FROM clause, but there is one or more FROM NAMED clauses, then the dataset includes an empty graph for the default graph. @@ -105,28 +105,28 @@ For this reason, Explore backend **does not allow the manipulation of the servic To enforce this policy, the following restriction applies to incoming [SPARQL 1.1](https://www.w3.org/TR/sparql11-update/) Update queries: -- Update queries (INSERT DATA, DELETE DATA and DELETE/INSERT) targeted against the service's default graph will not be accepted by returning an HTTP 400 Bad Request status code. +- Update queries (INSERT DATA, DELETE DATA and DELETE/INSERT) targeted against the service's default graph will not be accepted by returning an HTTP 400 Bad Request status code. ### Default RDF dataset The interpretation of the RDF dataset of a query differs between various SPARQL service implementations. In the case a query declares no RDF dataset, Explore backend uses the following default RDF dataset declaration to provide a uniform behavior for all supported SPARQL services: -- The default graph is the union ([RDF Merge graph](https://www.w3.org/TR/sparql11-query/#sparqlDataset)) of all named graphs the user is allowed to access. -- The set of named graphs contains all named graphs the user is allowed to access. +- The default graph is the union ([RDF Merge graph](https://www.w3.org/TR/sparql11-query/#sparqlDataset)) of all named graphs the user is allowed to access. +- The set of named graphs contains all named graphs the user is allowed to access. ## HTTP error responses The default format for HTTP error responses is compliant with [RFC 7807 Problem Details for HTTP APIs](https://tools.ietf.org/html/rfc7807). An HTTP error response contains a JSON object that provides at least two fields: -- `title`: A short, human-readable summary of the problem type. -- `detail`: A human-readable explanation specific to this occurrence of the problem. +- `title`: A short, human-readable summary of the problem type. +- `detail`: A human-readable explanation specific to this occurrence of the problem. The following optional non-standard fields may also be set: -- `status`: The HTTP status code for this occurrence of the problem. -- `cause`: The cause for this occurrence of the problem. It contains at least the same elements as specified previously, such as `title` and `detail`. +- `status`: The HTTP status code for this occurrence of the problem. +- `cause`: The cause for this occurrence of the problem. It contains at least the same elements as specified previously, such as `title` and `detail`. The following example shows an HTTP response containing JSON problem details using the `application/problem+json` media type: diff --git a/docs/develop/index.md b/docs/develop/index.md index 40561f98c..c31d88f00 100644 --- a/docs/develop/index.md +++ b/docs/develop/index.md @@ -9,19 +9,19 @@ API documentation and programming recipes.
-- :material-language-java: Java +- :material-language-java: Java --- [Accessing Graphs with Java Applications](accessing-graphs-with-java-applications/index.md) covers how to connect to Corporate Memory using a Java program. -- :material-language-python: Python +- :material-language-python: Python --- For Python developers, we offer a [Plugin SDK](python-plugins/index.md) as well as an API for accessing and manipulating Corporate Memory Instances ([cmem-cmempy](cmempy-python-api/index.md)). -- :material-api: OpenAPI specification +- :material-api: OpenAPI specification --- diff --git a/docs/develop/python-plugins/development/index.md b/docs/develop/python-plugins/development/index.md index 0a3bd58e6..0c9c13ad7 100644 --- a/docs/develop/python-plugins/development/index.md +++ b/docs/develop/python-plugins/development/index.md @@ -32,9 +32,9 @@ A workflow plugin may accept an arbitrary list of inputs and optionally returns The lifecycle of a workflow plugin is as follows: -- The plugin will be instantiated once the workflow execution reaches the respective plugin. -- The `execute` function is called and gets the results of the ingoing operators as input. -- The output is forwarded to the next operator. +- The plugin will be instantiated once the workflow execution reaches the respective plugin. +- The `execute` function is called and gets the results of the ingoing operators as input. +- The output is forwarded to the next operator. The following depiction shows a task of the plugin **My Workflow Plugin**. The task has two connected incoming tasks and one connected outgoing task. @@ -100,13 +100,13 @@ The `PluginParameter` class can be instantiated multiple times within a `@Plugin The `PluginParameter` has several parameters that can be specified when initializing an instance: -- `name`: The name of the parameter. This is a required parameter and must be specified. -- `label`: A visible label of the parameter. This is an optional parameter and can be left blank. If left blank, the name of the parameter will be used as the label. -- `description`: A visible description of the parameter. This is an optional parameter and can be left blank. -- `param_type`: Optionally overrides the parameter type. Usually, this does not have to be set manually as it will be inferred from the plugin automatically. -- `default_value`: The parameter default value (optional). If not specified, it will be inferred from the plugin automatically. -- `advanced`: A boolean flag indicating whether or not this is an advanced parameter that can only be changed in the advanced section. This is an optional parameter and defaults to False. -- `visible`: A boolean flag indicating whether or not the parameter will be displayed to the user in the UI. This is an optional parameter and defaults to True. +- `name`: The name of the parameter. This is a required parameter and must be specified. +- `label`: A visible label of the parameter. This is an optional parameter and can be left blank. If left blank, the name of the parameter will be used as the label. +- `description`: A visible description of the parameter. This is an optional parameter and can be left blank. +- `param_type`: Optionally overrides the parameter type. Usually, this does not have to be set manually as it will be inferred from the plugin automatically. +- `default_value`: The parameter default value (optional). If not specified, it will be inferred from the plugin automatically. +- `advanced`: A boolean flag indicating whether or not this is an advanced parameter that can only be changed in the advanced section. This is an optional parameter and defaults to False. +- `visible`: A boolean flag indicating whether or not the parameter will be displayed to the user in the UI. This is an optional parameter and defaults to True. ## Parameter Type @@ -278,18 +278,18 @@ The `EnumParameterType`is an example of a parameter type that uses auto-completi `autocomplete()` takes in three parameters: `query_terms`, `depend_on_parameter_values`, and `context`. It returns a list of `Autocompletion` objects, which represent the possible auto-completion results. -- The `query_terms` parameter is a list of lower case conjunctive search terms. These are the search terms that the user has entered, and the `auto-completion()` will attempt to find results that match all of them. +- The `query_terms` parameter is a list of lower case conjunctive search terms. These are the search terms that the user has entered, and the `auto-completion()` will attempt to find results that match all of them. -- The `depend_on_parameter_values` parameter is a list of values for the parameters that the `auto-completion()` depends on. These values will be used to generate the auto-completion results. The type of each parameter value is the same as in the init method, which means that if a `password` parameter is specified, the type of the parameter value will be of `Password` Type. +- The `depend_on_parameter_values` parameter is a list of values for the parameters that the `auto-completion()` depends on. These values will be used to generate the auto-completion results. The type of each parameter value is the same as in the init method, which means that if a `password` parameter is specified, the type of the parameter value will be of `Password` Type. -- The `context` parameter represents the `PluginContext` in which the auto-completion is requested. This could be, for example, the context of a specific plugin, or the context of the entire system. +- The `context` parameter represents the `PluginContext` in which the auto-completion is requested. This could be, for example, the context of a specific plugin, or the context of the entire system. #### Autocompletion The method returns a list of `Autocompletion` objects, which represent the possible auto-completion results. Each `Autocompletion` object has two attributes: value and label. -- The `value` attribute represents the value to which the parameter value should be set. -- The `label` attribute is an optional label that a human user would see instead. +- The `value` attribute represents the value to which the parameter value should be set. +- The `label` attribute is an optional label that a human user would see instead. !!! Note @@ -424,10 +424,10 @@ The `system` attribute is of type SystemContext and contains general system info ExecutionContext combines context objects that are available during plugin execution. It contains four attributes: -- `system`: An instance of the SystemContext, which provides general system information. -- `user`: An optional instance of the UserContext, which provides information about the user that issued the plugin execution. -- `task`: An instance of the TaskContext, which provides metadata about the executed plugin. -- `report`: An instance of the ReportContext, which allows to update the execution report. +- `system`: An instance of the SystemContext, which provides general system information. +- `user`: An optional instance of the UserContext, which provides information about the user that issued the plugin execution. +- `task`: An instance of the TaskContext, which provides metadata about the executed plugin. +- `report`: An instance of the ReportContext, which allows to update the execution report. The ExecutionContext is used to provide context information to plugins during execution, enabling plugins to access information about the environment in which they are running, the user who initiated the execution, and the task being executed. The ReportContext attribute allows plugins to generate and update reports during execution. @@ -666,9 +666,9 @@ This JVM-based logger will prefix all plugin logs with `plugins.python. -- :material-download-circle-outline: [Installation and Usage](installation/index.md) +- :material-download-circle-outline: [Installation and Usage](installation/index.md) --- Intended for Linked Data Experts and Deployment Engineers, this page outlines how to install and use existing python plugins. -- :material-code-json: [Development](development/index.md) +- :material-code-json: [Development](development/index.md) --- Intended for Developers, this page gives an overview on the plugin concepts and how to start developing your own plugins. -- :material-cog-outline: [Setup and Configuration](setup/index.md) +- :material-cog-outline: [Setup and Configuration](setup/index.md) --- diff --git a/docs/develop/python-plugins/installation/index.md b/docs/develop/python-plugins/installation/index.md index 538538263..15d0f16a6 100644 --- a/docs/develop/python-plugins/installation/index.md +++ b/docs/develop/python-plugins/installation/index.md @@ -31,7 +31,7 @@ cmem_plugin_graphql-GraphQLPlugin WorkflowPlugin GraphQL query You can get a list of all installed python packages: (1) { .annotate } -1. This list contains all installed packages in the python environment, not just your plugin packages. +1. This list contains all installed packages in the python environment, not just your plugin packages. ```shell-session title="List all installed python packages:" $ cmemc admin workspace python list @@ -95,4 +95,3 @@ Drag and drop it on the canvas, and connect it with ingoing and / or outgoing li A workaround for this is to install or upgrade plugins right after rebooting DataIntergration (and before you start a workflow that uses a python plugin). We currently advise against using NFS on this path directly. The problem is [known by the python community](https://github.com/pypa/pip/issues/6327) but there is no fix or workaround available yet. - diff --git a/docs/develop/python-plugins/setup/index.md b/docs/develop/python-plugins/setup/index.md index 8fde34e3b..f499d2fb9 100644 --- a/docs/develop/python-plugins/setup/index.md +++ b/docs/develop/python-plugins/setup/index.md @@ -9,14 +9,12 @@ tags: This section describes which backend components are needed on the Build (DataIntegration) server, in order to use python plugins. - ## Basic Configuration and Dependencies !!! info When using the official eccenca docker images, setup and basic configuration is already done. - ??? note "Build (DataIntegration) Configuration" The following Build (DataIntegration) configuration section describes how to setup and enable the Python Plugin system. @@ -47,7 +45,6 @@ This section describes which backend components are needed on the Build (DataInt } ``` - ??? note "Python Interpreter" An installation of the CPython distribution (at least version 3.3) is required. @@ -77,13 +74,13 @@ This section describes which backend components are needed on the Build (DataInt The basic setup allows for installation of packages from the [pypi.org](https://pypi.org/search/?q=%22cmem-plugin-%22) python package index, maintained by the [Python Software Foundation](https://www.python.org/psf-landing/). In order to change the remote index server, from where you can install python packages, you need to set the following environment variables in the data integration container: -- `PIP_INDEX_URL` - Base URL of the default python package index Base URL. This should point to a repository which is compliant with [PEP 503 (the simple repository API)](https://peps.python.org/pep-0503/). If this variable is not set, the [official Python Package Index](https://pypi.python.org/simple) is used. - - Example Value: `https://pypi.eccenca.com/simple` (the eccenca Python Package Index holds only published Corporate Memory Python Plugins and respective dependencies) - - Changing this value means, that you can install packages **only** from this repository. -- `PIP_EXTRA_INDEX_URL` - Extra URLs of package indexes to use in addition to the default package index. - - Example Value: `https://pypi.eccenca.com/simple https://example.org/simple` - - Multiple index URLs have to be given space-separated. - - Changing this values means you can install packages from the given repositories **in addition** to the main index. +- `PIP_INDEX_URL` - Base URL of the default python package index Base URL. This should point to a repository which is compliant with [PEP 503 (the simple repository API)](https://peps.python.org/pep-0503/). If this variable is not set, the [official Python Package Index](https://pypi.python.org/simple) is used. + - Example Value: `https://pypi.eccenca.com/simple` (the eccenca Python Package Index holds only published Corporate Memory Python Plugins and respective dependencies) + - Changing this value means, that you can install packages **only** from this repository. +- `PIP_EXTRA_INDEX_URL` - Extra URLs of package indexes to use in addition to the default package index. + - Example Value: `https://pypi.eccenca.com/simple https://example.org/simple` + - Multiple index URLs have to be given space-separated. + - Changing this values means you can install packages from the given repositories **in addition** to the main index. For individual needs, you can use additional environment variables known by `pip` (`PIP_TRUSTED_HOST`, `PIP_CERT`, ...). Please have a look at the [pip documentation](https://pip.pypa.io/en/stable/topics/configuration/#environment-variables). @@ -93,9 +90,9 @@ Please have a look at the [pip documentation](https://pip.pypa.io/en/stable/topi In cases, where you have limited or disabled network capabilities to the internet, you can disable package retrieval and provide the packages in a local directory. To do so, you need to set the following environment variables in the data integration container: -- `PIP_NO_INDEX` - set the value as `true` to disable the package retrieval completely. -- `PIP_FIND_LINKS` - set to a container internal directory, where the packages and its dependencies will be provided. - - Example Value: `/data/downloaded-packages` +- `PIP_NO_INDEX` - set the value as `true` to disable the package retrieval completely. +- `PIP_FIND_LINKS` - set to a container internal directory, where the packages and its dependencies will be provided. + - Example Value: `/data/downloaded-packages` This setup will allow installation of packages and its dependencies ONLY from the given directory. diff --git a/docs/explore-and-author/charts-catalog/index.md b/docs/explore-and-author/charts-catalog/index.md index 6c4eecc17..b4bc86429 100644 --- a/docs/explore-and-author/charts-catalog/index.md +++ b/docs/explore-and-author/charts-catalog/index.md @@ -37,12 +37,12 @@ Select a query from the :eccenca-application-queries: [Query Catalog](../query-m The following activities can be performed in this component: -- **Select a query** — Select a query to visualize by clicking on a :material-plus-circle-outline: button. +- **Select a query** — Select a query to visualize by clicking on a :material-plus-circle-outline: button. The Assisted chart form (see below) supports a single query, while the Advanced chart form can use multiple queries. -- From the dropdown menu: - - **Parameters** — Some queries have parameters that need to be filled with real values. - - **Preview** — View a preview of the retrieved data. - - **View in query catalog** — Opens the query in the query catalog. +- From the dropdown menu: + - **Parameters** — Some queries have parameters that need to be filled with real values. + - **Preview** — View a preview of the retrieved data. + - **View in query catalog** — Opens the query in the query catalog. ### Chart forms @@ -74,6 +74,6 @@ To do so, add :material-plus-circle-outline: the **Chart Visualization** propert To customize the chart several placeholders can be used in you queries: -- `{{shuiResource}}`, the resource currently shown with the node shape of this property shape. -- `{{shuiMainResource}}`, refers to the main resource rendered in the start node shape of the currently displayed node shape tree (only relevant in case of sub-shape usage). -- `{{shuiGraph}}`, the currently used graph. +- `{{shuiResource}}`, the resource currently shown with the node shape of this property shape. +- `{{shuiMainResource}}`, refers to the main resource rendered in the start node shape of the currently displayed node shape tree (only relevant in case of sub-shape usage). +- `{{shuiGraph}}`, the currently used graph. diff --git a/docs/explore-and-author/companion/index.md b/docs/explore-and-author/companion/index.md index 1646104a4..424d16f9d 100644 --- a/docs/explore-and-author/companion/index.md +++ b/docs/explore-and-author/companion/index.md @@ -32,14 +32,14 @@ You can open the _Chat with Companion_ from any explore module via the :eccenca- In the companion sidebar, use: -- :eccenca-toggler-maximize: to increase the size of the sidebar -- :eccenca-toggler-minimize: to decrease the size of the sidebar -- :eccenca-item-add-artefact: to start a new conversation -- :octicons-triangle-down-24: the dropdown menu to select or search for a conversation -- :eccenca-item-upload: to add documents as context to the chat (supported formats include `pdf`, `txt` and `md`) -- :eccenca-item-wrench: to configure the tools that the LLM can use to answer. -- :eccenca-item-microphone: to use your microphone to dictate your questions -- :material-send-variant-outline: to submit your question +- :eccenca-toggler-maximize: to increase the size of the sidebar +- :eccenca-toggler-minimize: to decrease the size of the sidebar +- :eccenca-item-add-artefact: to start a new conversation +- :octicons-triangle-down-24: the dropdown menu to select or search for a conversation +- :eccenca-item-upload: to add documents as context to the chat (supported formats include `pdf`, `txt` and `md`) +- :eccenca-item-wrench: to configure the tools that the LLM can use to answer. +- :eccenca-item-microphone: to use your microphone to dictate your questions +- :material-send-variant-outline: to submit your question To maximize the relevance and accuracy of the generated answer, your question is automatically enriched based on the context of the current view (e.g. the query text, the graph and the resource IRI). @@ -51,7 +51,7 @@ The companion chat uses various tools to interact with Corporate Memory. These tools are used autonomously by the configured LLM model to gather the relevant context needed to generate the best answer. If your use case or question requires you to limit the available tools (e.g. if you only want to use queries from the query catalogue and not generate any), you can configure the tools the model can use via the drop-down widget. -- Query Catalog related +- Query Catalog related `query_list` : Lists all existing SPARQL queries on this corporate memory instance. @@ -63,13 +63,13 @@ If your use case or question requires you to limit the available tools (e.g. if : Executes a saved SPARQL query on this corporate memory instance. The query is identified by its IRI. -- Query related: +- Query related: `query_execute_json` : Executes a `SPARQL SELECT` query on this corporate memory instance. The query is provided as a string. -- Introspection related: +- Introspection related: `graph_tree` : Retrieves a tree of all the direct and transitive `owl:imports` for a given graph. @@ -94,7 +94,7 @@ If your use case or question requires you to limit the available tools (e.g. if : All Classes defined in this knowledge graph by installed vocabularies. While all classes are returned, they can easily be grouped by either their explicit association with an ontology in the isDefinedBy (`rdfs:isDefinedBy`) field, or by the sourceGraph field, which contains the names of the graphs from which the type statements were loaded. -- Resource related +- Resource related `resource_search` : Performs a lucene syntax full text search of labels and descriptions for 'searchTerm' and retrieve list of best matches. diff --git a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md index 367df4bfb..825dd30c9 100644 --- a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md +++ b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md @@ -13,298 +13,254 @@ This is a list of supported data types in shapes. Not all datatypes result in specific widgets. - #### anyURI - The ·lexical space· of anyURI is finite-length character sequences which, when the algorithm defined in Section 5.4 of [XML Linking Language] is applied to them, result in strings which are legal URIs according to [RFC 2396], as amended by [RFC 2732]. Note: Spaces are, in principle, allowed in the ·lexical space· of anyURI, however, their use is highly discouraged (unless they are encoded by %20). IRI: `http://www.w3.org/2001/XMLSchema#anyURI` #### base64Binary - The lexical forms of base64Binary values are limited to the 65 characters of the Base64 Alphabet defined in [RFC 2045], i.e., a-z, A-Z, 0-9, the plus sign (+), the forward slash (/) and the equal sign (=), together with the characters defined in [XML 1.0 (Second Edition)] as white space. No other characters are allowed. IRI: `http://www.w3.org/2001/XMLSchema#base64Binary` #### boolean - An instance of a datatype that is defined as ·boolean· can have the following legal literals {true, false, 1, 0}. IRI: `http://www.w3.org/2001/XMLSchema#boolean` #### byte - byte is ·derived· from short by setting the value of ·maxInclusive· to be 127 and ·minInclusive· to be -128. byte has a lexical representation consisting of an optional sign followed by a finite-length sequence of decimal digits (#x30-#x39). If the sign is omitted, '+' is assumed. For example: -1, 0, 126, +100. IRI: `http://www.w3.org/2001/XMLSchema#byte` #### date - The lexical space of date consists of finite-length sequences of characters of the form: `'-'? yyyy '-' mm '-' dd zzzzzz?` where the date and optional timezone are represented exactly the same way as they are for dateTime. The first moment of the interval is that represented by: `'-' yyyy '-' mm '-' dd 'T00:00:00' zzzzzz?` and the least upper bound of the interval is the timeline point represented (noncanonically) by: `'-' yyyy '-' mm '-' dd 'T24:00:00' zzzzzz?`. IRI: `http://www.w3.org/2001/XMLSchema#date` #### dateTime - The ·lexical space· of dateTime consists of finite-length sequences of characters of the form: `'-'? yyyy '-' mm '-' dd 'T' hh ':' mm ':' ss ('.' s+)? (zzzzzz)?` For example, `2002-10-10T12:00:00-05:00` (noon on 10 October 2002, Central Daylight Savings Time as well as Eastern Standard Time in the U.S.) is `2002-10-10T17:00:00Z`, five hours later than `2002-10-10T12:00:00Z`. IRI: `http://www.w3.org/2001/XMLSchema#dateTime` #### dateTimeStamp - -The lexical space of dateTimeStamp consists of strings which are in the ·lexical space· of dateTime and which also match the regular expression '.*(Z|(+|-)[0-9][0-9]:[0-9][0-9])' +The lexical space of dateTimeStamp consists of strings which are in the ·lexical space· of dateTime and which also match the regular expression '.*(Z|[+|-][0-9](0-9):[0-9][0-9])' IRI: `http://www.w3.org/2001/XMLSchema#dateTimeStamp` #### decimal - decimal has a lexical representation consisting of a finite-length sequence of decimal digits (#x30-#x39) separated by a period as a decimal indicator. An optional leading sign is allowed. If the sign is omitted, '+' is assumed. Leading and trailing zeroes are optional. If the fractional part is zero, the period and following zeroes can be omitted. For example: -1.23, 12678967.543233, +100000.00, 210. IRI: `http://www.w3.org/2001/XMLSchema#decimal` #### double - double values have a lexical representation consisting of a mantissa followed, optionally, by the character 'E' or 'e', followed by an exponent. The exponent ·must· be an integer. The mantissa must be a decimal number. The representations for exponent and mantissa must follow the lexical rules for integer and decimal. If the 'E' or 'e' and the following exponent are omitted, an exponent value of 0 is assumed. The special values positive and negative infinity and not-a-number have lexical representations INF, -INF and NaN, respectively. Lexical representations for zero may take a positive or negative sign. For example, -1E4, 1267.43233E12, 12.78e-2, 12 , -0, 0 and INF are all legal literals for double. IRI: `http://www.w3.org/2001/XMLSchema#double` #### duration - The lexical representation for duration is the ISO 8601 extended format `PnYnMnDTnHnMnS`, where `nY` represents the number of years, `nM` the number of months, `nD` the number of days, `T` is the date/time separator, `nH` the number of hours, `nM` the number of minutes and `nS` the number of seconds. The number of seconds can include decimal digits to arbitrary precision. IRI: `http://www.w3.org/2001/XMLSchema#duration` #### float - float values have a lexical representation consisting of a mantissa followed, optionally, by the character 'E' or 'e', followed by an exponent. The exponent ·must· be an integer. The mantissa must be a decimal number. The representations for exponent and mantissa must follow the lexical rules for integer and decimal. If the 'E' or 'e' and the following exponent are omitted, an exponent value of 0 is assumed. The special values positive and negative infinity and not-a-number have lexical representations INF, -INF and NaN, respectively. Lexical representations for zero may take a positive or negative sign. For example, -1E4, 1267.43233E12, 12.78e-2, 12 , -0, 0 and INF are all legal literals for float. IRI: `http://www.w3.org/2001/XMLSchema#float` #### gDay - The lexical representation for gDay is the left truncated lexical representation for date: `---DD` . An optional following time zone qualifier is allowed as for date. No preceding sign is allowed. No other formats are allowed. See also ISO 8601 Date and Time Formats. IRI: `http://www.w3.org/2001/XMLSchema#gDay` #### gMonth - The lexical representation for gMonth is the left and right truncated lexical representation for date: `--MM`. An optional following time zone qualifier is allowed as for date. No preceding sign is allowed. No other formats are allowed. See also ISO 8601 Date and Time Formats. IRI: `http://www.w3.org/2001/XMLSchema#gMonth` #### gMonthDay - The lexical representation for gMonthDay is the left truncated lexical representation for date: `--MM-DD`. An optional following time zone qualifier is allowed as for date. No preceding sign is allowed. No other formats are allowed. See also ISO 8601 Date and Time Formats. This datatype can be used to represent a specific day in a month. To say, for example, that my birthday occurs on the 14th of September ever year. IRI: `http://www.w3.org/2001/XMLSchema#gMonthDay` #### gYear - The lexical representation for gYear is the reduced (right truncated) lexical representation for dateTime: `CCYY`. No left truncation is allowed. An optional following time zone qualifier is allowed as for dateTime. To accommodate year values outside the range from `0001` to `9999`, additional digits can be added to the left of this representation and a preceding `-` sign is allowed. For example, to indicate 1999, one would write: `1999`. See also ISO 8601 Date and Time Formats. IRI: `http://www.w3.org/2001/XMLSchema#gYear` #### gYearMonth - The lexical representation for gYearMonth is the reduced (right truncated) lexical representation for dateTime: CCYY-MM. No left truncation is allowed. An optional following time zone qualifier is allowed. To accommodate year values outside the range from 0001 to 9999, additional digits can be added to the left of this representation and a preceding '-' sign is allowed. For example, to indicate the month of May 1999, one would write: 1999-05. See also ISO 8601 Date and Time Formats (·D). IRI: `http://www.w3.org/2001/XMLSchema#gYearMonth` #### hexBinary - hexBinary has a lexical representation where each binary octet is encoded as a character tuple, consisting of two hexadecimal digits ([0-9a-fA-F]) representing the octet code. For example, '0FB7' is a hex encoding for the 16-bit integer 4023 (whose binary representation is 111110110111). IRI: `http://www.w3.org/2001/XMLSchema#hexBinary` #### HTML - The datatype of RDF literals storing fragments of HTML content IRI: `http://www.w3.org/1999/02/22-rdf-syntax-ns#HTML` #### int - int is ·derived· from long by setting the value of ·maxInclusive· to be 2147483647 and ·minInclusive· to be -2147483648. int has a lexical representation consisting of an optional sign followed by a finite-length sequence of decimal digits (#x30-#x39). If the sign is omitted, '+' is assumed. For example: -1, 0, 126789675, +100000. IRI: `http://www.w3.org/2001/XMLSchema#int` #### integer - integer has a lexical representation consisting of a finite-length sequence of decimal digits (#x30-#x39) with an optional leading sign. If the sign is omitted, '+' is assumed. For example: -1, 0, 12678967543233, +100000. IRI: `http://www.w3.org/2001/XMLSchema#integer` #### Jinja Template String - Jinja is a modern and designer-friendly templating language for Python and other languages. IRI: `https://vocab.eccenca.com/shui/jinja` #### langString - The datatype of language-tagged string values IRI: `http://www.w3.org/1999/02/22-rdf-syntax-ns#langString` #### language - language represents natural language identifiers as defined by by [RFC 3066] . The ·value space· of language is the set of all strings that are valid language identifiers as defined [RFC 3066] . The ·lexical space· of language is the set of all strings that conform to the pattern [a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})* . The ·base type· of language is token. IRI: `http://www.w3.org/2001/XMLSchema#language` #### long - long is ·derived· from integer by setting the value of ·maxInclusive· to be 9223372036854775807 and ·minInclusive· to be -9223372036854775808. long has a lexical representation consisting of an optional sign followed by a finite-length sequence of decimal digits (#x30-#x39). If the sign is omitted, '+' is assumed. For example: -1, 0, 12678967543233, +100000. IRI: `http://www.w3.org/2001/XMLSchema#long` #### Markdown - In addition to rdf:HTML, this is the datatype of RDF literals storing fragments of markdown content. eccenca Corporate Memory user interfaces support the rendering of all basic Markdown syntax features as well as the extensions for tables, code blocks, strikethrough, task lists and footnotes. IRI: `http://ns.ontowiki.net/SysOnt/Markdown` #### Name - Name represents XML Names. The ·value space· of Name is the set of all strings which ·match· the Name production of [XML 1.0 (Second Edition)]. The ·lexical space· of Name is the set of all strings which ·match· the Name production of [XML 1.0 (Second Edition)]. The ·base type· of Name is token. IRI: `http://www.w3.org/2001/XMLSchema#Name` #### NCName - NCName represents XML 'non-colonized' Names. The ·value space· of NCName is the set of all strings which ·match· the NCName production of [Namespaces in XML]. The ·lexical space· of NCName is the set of all strings which ·match· the NCName production of [Namespaces in XML]. The ·base type· of NCName is Name. IRI: `http://www.w3.org/2001/XMLSchema#NCName` #### negativeInteger - negativeInteger has a lexical representation consisting of a negative sign ('-') followed by a finite-length sequence of decimal digits (#x30-#x39). For example: -1, -12678967543233, -100000. IRI: `http://www.w3.org/2001/XMLSchema#negativeInteger` #### NMTOKEN - NMTOKEN represents the NMTOKEN attribute type from [XML 1.0 (Second Edition)]. The ·value space· of NMTOKEN is the set of tokens that ·match· the Nmtoken production in [XML 1.0 (Second Edition)]. The ·lexical space· of NMTOKEN is the set of strings that ·match· the Nmtoken production in [XML 1.0 (Second Edition)]. The ·base type· of NMTOKEN is token. IRI: `http://www.w3.org/2001/XMLSchema#NMTOKEN` #### nonNegativeInteger - nonNegativeInteger has a lexical representation consisting of an optional sign followed by a finite-length sequence of decimal digits (#x30-#x39). If the sign is omitted, the positive sign ('+') is assumed. If the sign is present, it must be '+' except for lexical forms denoting zero, which may be preceded by a positive ('+') or a negative ('-') sign. For example: 1, 0, 12678967543233, +100000. IRI: `http://www.w3.org/2001/XMLSchema#nonNegativeInteger` #### nonPositiveInteger - nonPositiveInteger has a lexical representation consisting of an optional preceding sign followed by a finite-length sequence of decimal digits (#x30-#x39). The sign may be '+' or may be omitted only for lexical forms denoting zero, in all other lexical forms, the negative sign ('-') must be present. For example: -1, 0, -12678967543233, -100000. IRI: `http://www.w3.org/2001/XMLSchema#nonPositiveInteger` #### normalizedString - normalizedString represents white space normalized strings. The ·value space· of normalizedString is the set of strings that do not contain the carriage return (#xD), line feed (#xA) nor tab (#x9) characters. The ·lexical space· of normalizedString is the set of strings that do not contain the carriage return (#xD), line feed (#xA) nor tab (#x9) characters. The ·base type· of normalizedString is string. IRI: `http://www.w3.org/2001/XMLSchema#normalizedString` #### positiveInteger - positiveInteger has a lexical representation consisting of an optional positive sign ('+') followed by a finite-length sequence of decimal digits (#x30-#x39). For example: 1, 12678967543233, +100000. IRI: `http://www.w3.org/2001/XMLSchema#positiveInteger` #### short - short is ·derived· from int by setting the value of ·maxInclusive· to be 32767 and ·minInclusive· to be -32768. short has a lexical representation consisting of an optional sign followed by a finite-length sequence of decimal digits (#x30-#x39). If the sign is omitted, '+' is assumed. For example: -1, 0, 12678, +10000. IRI: `http://www.w3.org/2001/XMLSchema#short` #### string - The string datatype represents character strings in XML. The ·value space· of string is the set of finite-length sequences of characters (as defined in [XML 1.0 (Second Edition)]) that ·match· the Char production from [XML 1.0 (Second Edition)]. A character is an atomic unit of communication, it is not further specified except to note that every character has a corresponding Universal Character Set code point, which is an integer. IRI: `http://www.w3.org/2001/XMLSchema#string` #### time - The lexical representation for time is the left truncated lexical representation for dateTime: `hh:mm:ss.sss` with optional following time zone indicator. For example, to indicate 1:20 pm for Eastern Standard Time which is 5 hours behind Coordinated Universal Time (UTC), one would write: `13:20:00-05:00`. See also ISO 8601 Date and Time Formats. IRI: `http://www.w3.org/2001/XMLSchema#time` #### token - token represents tokenized strings. The ·value space· of token is the set of strings that do not contain the carriage return (#xD), line feed (#xA) nor tab (#x9) characters, that have no leading or trailing spaces (#x20) and that have no internal sequences of two or more spaces. The ·lexical space· of token is the set of strings that do not contain the carriage return (#xD), line feed (#xA) nor tab (#x9) characters, that have no leading or trailing spaces (#x20) and that have no internal sequences of two or more spaces. The ·base type· of token is normalizedString. IRI: `http://www.w3.org/2001/XMLSchema#token` #### unsignedByte - unsignedByte is ·derived· from unsignedShort by setting the value of ·maxInclusive· to be 255. unsignedByte has a lexical representation consisting of a finite-length sequence of decimal digits (#x30-#x39). For example: 0, 126, 100. IRI: `http://www.w3.org/2001/XMLSchema#unsignedByte` #### unsignedInt - unsignedInt is ·derived· from unsignedLong by setting the value of ·maxInclusive· to be 4294967295. unsignedInt has a lexical representation consisting of a finite-length sequence of decimal digits (#x30-#x39). For example: 0, 1267896754, 100000. IRI: `http://www.w3.org/2001/XMLSchema#unsignedInt` #### unsignedLong - unsignedLong is ·derived· from nonNegativeInteger by setting the value of ·maxInclusive· to be 18446744073709551615. unsignedLong has a lexical representation consisting of a finite-length sequence of decimal digits (#x30-#x39). For example: 0, 12678967543233, 100000. IRI: `http://www.w3.org/2001/XMLSchema#unsignedLong` #### unsignedShort - unsignedShort is ·derived· from unsignedInt by setting the value of ·maxInclusive· to be 65535. unsignedShort has a lexical representation consisting of a finite-length sequence of decimal digits (#x30-#x39). For example: 0, 12678, 10000. IRI: `http://www.w3.org/2001/XMLSchema#unsignedShort` #### XMLLiteral - The datatype of XML literal values. IRI: `http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral` - diff --git a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes/index.md b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes/index.md index e813f64d3..5592eeaf3 100644 --- a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes/index.md +++ b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes/index.md @@ -14,7 +14,6 @@ They can be used to validate resources as well as to define custom forms for pre This page lists all supported properties to describe node shapes. - ## Naming and Presentation !!! info @@ -22,59 +21,45 @@ This page lists all supported properties to describe node shapes. ### Name - The name of the node is presented to the user only when he needs to distinguish between different shapes for the same resource. Used Path: `shacl:name` - ### Description - The node description should provide context information for the user when creating a new resource based on this node. Used Path: `rdfs:comment` - ### Navigation list query - This property links the node shape to a SPARQL 1.1 Query in order to provide a sophisticated user navigation list query e.g. to add specific additional columns. The query should use {{FROM}} as a placeholder for the FROM section. Used Path: `shui:navigationListQuery` - ### Depiction Image - This property links a node shape to an image in order to use this image when showing resources based on this node shape somewhere. Used Path: `http://xmlns.com/foaf/0.1/depiction` - ### Order - Specifies the order of this node shape. This property is used for the drop-down list in the shaped resource view as well as for priorising depictions and update queries. It is only relevant in case multiple node shapes are on the same level in the shape hierarchy (which is based on the rdfs:subClassOf relationship). - Used Path: `shacl:order` - ### Chart Visualization - Integrates a chart visualization in the node shape area. This Property is deprecated - charts on node shape level are not supported anymore. Used Path: `shui:provideChartVisualization` - ### Widgets - Integrate non-validating visualization widget in the node shape area. Used Path: `shui:WidgetIntegration_integrate` @@ -86,15 +71,12 @@ Used Path: `shui:WidgetIntegration_integrate` ### Property Shapes - The used property shapes on this node. Please note that this is NOT a link to a datatype or object property but to a SHACL property shape. Used Path: `shacl:property` - ### Target class - Class this NodeShape applies to. This is a direct link to a class resource from a vocabulary. Used Path: `shacl:targetClass` @@ -106,39 +88,30 @@ Used Path: `shacl:targetClass` ### Severity - Categorize validation results (:Info, :Warning, :Violation). Defaults to :Violation. Used Path: `shacl:severity` - ### SPARQL Constraints - Add additional SPARQL based validation to your Node Shape. Used Path: `shacl:sparql` - ### URI template - The URI template which is used, when a user manually creates new resources with this Node Shape. Used Path: `shui:uriTemplate` - ### Closed Node - Enabling this will result in failing validation if the resource / node has properties which are NOT described with attached property shapes. Used Path: `shacl:closed` - ### Query: On delete update - A query which is executed when the resource the node shape applies to gets deleted. The following placeholder can be used in the query text of the SPARQL query: @@ -147,15 +120,11 @@ The following placeholder can be used in the query text of the SPARQL query: - `{{shuiAccount}}` - the account IRI of the active user, this includes the username (use a SUBSTR() function if you need the name only) - `{{shuiAccountName}}` - the user name/ID of the active user account - `{{shuiMainResource}}` - refers to the main resource rendered in the start node shape of the currently displayed node shape tree (only relevant in case of sub-shape usage) - Used Path: `shui:onDeleteUpdate` - ### Query: On update update - - A query which is executed when this node shape is submitted. The query should be saved in the same graph as the shape (or imported). @@ -167,21 +136,16 @@ The query can use these placeholders: - `{{shuiAccountName}}` - the user name/ID of the active user account - `{{shuiMainResource}}` - refers to the main resource rendered in the start node shape of the currently displayed node shape tree (only relevant in case of sub-shape usage) - Used Path: `shui:onUpdateUpdate` - ### Target Graph Template - Graph templates can be used to enforce writing statement in specific graphs rather than into the selected graph. Graph templates can be added to node and property shapes. A template on a property shape is used only for overwriting a template on a node shape (without a node shape graph template, they do not have an effect). Used Path: `shui:targetGraphTemplate` - ### Query: Is Creatable Resource - This query is executed to check if users get the controls to create resources (described with the node shape). The query needs to be an ASK query and can include the following placeholders, which will be substituted before execution: @@ -190,13 +154,10 @@ The query needs to be an ASK query and can include the following placeholders, w - `{{shuiAccount}}` - the IRI of the active user account - `{{shuiAccountName}}` - the user name/ID of the active user account - Used Path: `shui:askIfCreatableQuery` - ### Query: Is Removable Resource - This query is executed to check if users get the controls to remove resources (described with the node shape). The query needs to be an ASK query and can include the following placeholders, which will be substituted before execution: @@ -207,14 +168,10 @@ The query needs to be an ASK query and can include the following placeholders, w - `{{shuiAccountName}}` - the user name/ID of the active user account - `{{shuiMainResource}}` - refers to the main resource rendered in the start node shape of the currently displayed node shape tree (only relevant in case of sub-shape usage) - - Used Path: `shui:askIfRemovableQuery` - ### Query: Is Cloneable Resource - This query is executed to check if users get the controls to remove resources (described with the node shape). The query needs to be an ASK query and can include the following placeholders, which will be substituted before execution: @@ -225,19 +182,13 @@ The query needs to be an ASK query and can include the following placeholders, w - `{{shuiAccountName}}` - the user name/ID of the active user account - `{{shuiMainResource}}` - refers to the main resource rendered in the start node shape of the currently displayed node shape tree (only relevant in case of sub-shape usage) - - Used Path: `shui:askIfCloneableQuery` - ### On update trigger workflow - - A workflow trigger which is executed when this nodeshape is submitted. The workflow(s) run instantaneously upon submitting the form. - Used Path: `shui:onUpdateTriggerWorkflow` ## Statement Annotation @@ -247,15 +198,12 @@ Used Path: `shui:onUpdateTriggerWorkflow` ### Enable - A value of true enables visualisation and management capabilities of statement annotations (reification) for all statements which are shown via this shape. Used Path: `shui:enableStatementLevelMetadata` - ### Provide as Shape - A value of true enables this node shape to be applied as statement annotation (reification). Used Path: `shui:isApplicableAsStatementLevelMetadata` diff --git a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/property-shapes/index.md b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/property-shapes/index.md index ae0bf6879..fb06507e9 100644 --- a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/property-shapes/index.md +++ b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/property-shapes/index.md @@ -12,7 +12,6 @@ They are used to specify constraints and UI options that need to be met in the The following Property Shape properties are supported: - ## Naming and Presentation !!! info @@ -21,68 +20,52 @@ The following Property Shape properties are supported: ### Name - This name will be shown to the user. Used Path: `shacl:name` - ### Description - This text will be shown to the user in a tooltip. You can use new and blank lines for basic text structuring. Used Path: `shacl:description` - ### Order - Specifies the order of the property in the UI. Ordering is separate for each group. Used Path: `shacl:order` - ### Group - Group to which the property belongs to. Used Path: `shacl:group` - ### Show always - Default is false. A value of true let optional properties (min count = 0) show up by default. Used Path: `shui:showAlways` - ### Read only - Default is false. A value of true means the properties are not editable by the user. Useful for displaying system properties. Used Path: `shui:readOnly` - ### Chart Visualization (deprecated) - Integrates a chart visualization in the property shape area. Shapes with an integrated chart are ALWAYS shown in read mode and NEVER shown in edit mode. This Property is deprecated - please use a Widget Integration instead. Used Path: `shui:provideChartVisualization` - ### Provide Workflow Trigger (deprecated) - Integrates a workflow trigger button in order to execute workflows from or with this resource. Shapes with an integrated workflow trigger are ALWAYS shown in read mode and NEVER shown in edit mode. This property is deprecated - use a Widget Integration instead. - Used Path: `shui:provideWorkflowTrigger` ## Vocabulary @@ -93,23 +76,18 @@ Used Path: `shui:provideWorkflowTrigger` ### Property of - The node shape this property shape belongs to. Used Path: `shacl:property` - ### Path - The datatype or object property used in this shape. This path will be ignored if there is a table report defined for the property shape. However, in the Business Knowledge Editor, this path can always be used for exploration. Used Path: `shacl:path` - ### Query: Path Builder - Use this property to define a dynamic set of target resources or literals using a custom SPARQL query. This allows for advanced selection logic, including filtering, transitive relationships, or specific path sequences as well as the definition of a result set order. The values bound to the first variable in the query's projection will be treated as directly connected via the `sh:path` specified in this property shape. This enables simple backward-chaining inference. Note that the infererred connections are not (yet) included in SHACL validation. @@ -122,33 +100,26 @@ The following placeholder can be used in the query text of the SPARQL query: - `{{shuiAccountName}}` - the user name/ID of the active user account - `{{shuiMainResource}}` - refers to the main resource rendered in the start node shape of the currently displayed node shape tree (only relevant in case of sub-shape usage) -Note that for a proper usage of this feature, you additionally need to have the projection variables `?graph` or `?_graph` (the graph IRI where the relation statement is saved resp. in the context of which it was inferred) in your query. +Note that for a proper usage of this feature, you additionally need to have the projection variables `?graph` or `?_graph` (the graph IRI where the relation statement is saved resp. in the context of which it was inferred) in your query. If the connected property value is a resource, the variables in the projection of this query will be used to populate the columns in the complex widget and the advanced editor. - Used Path: `shui:valueQuery` - ### Node kind - The type of the linked nodes. In the Business Knowledge Editor, if these nodes are literals, they cannot be explored, but will be shown as metadata. Used Path: `shacl:nodeKind` - ### Min count - Min cardinality, 0 will show this property under optionals unless 'Show always = true' Used Path: `shacl:minCount` - ### Max count - Max cardinality Used Path: `shacl:maxCount` @@ -161,47 +132,36 @@ Used Path: `shacl:maxCount` ### Datatype - The datatype of the property. Used Path: `shacl:datatype` - ### Use textarea - Default is false. A value of true enables multiline editing capabilities for Literals via a `textarea` widget. Used Path: `shui:textarea` - ### Regex Pattern - A XPath regular expression (Perl like) that all literal strings need to match. Used Path: `shacl:pattern` - ### Regex Flags - An optional string of flags for the regular expression pattern (e.g. 'i' for case-insensitive mode) Used Path: `shacl:flags` - ### Languages allowed - This limits the given Literals to a list of languages. This property works only in combination with the datatype `rdf:langString`. Note that the expression for this property only allows for '2 Char ISO-639-1-Codes' only (no sub-tags). Used Path: `shui:languageIn` - ### Languages Unique - Default is false. A value of true enforces that no pair of Literals may use the same language tag. Used Path: `shacl:uniqueLang` @@ -214,31 +174,24 @@ Used Path: `shacl:uniqueLang` ### Class - Class of the connected IRI if its nodeKind is sh:IRI. In the Business Knowledge Editor, any new node that a user creates by means of this property shape, will be an instance this class. Used Path: `shacl:class` - ### Used Class for Resource Creation - Use this property to overrule which class is used when a user creates a new resource inside of this property shape on-the-fly. Used Path: `shui:usedClassForResourceCreation` - ### Disable default sorting of values - Per default, related resources are shown ordered by its IRI. This ordering can have a performance impact with large lists. Setting this property to true will disable this default behaviour. In case a path builder query with an order statement is used, this property has no effect. Used Path: `shui:disableDefaultValueSorting` - ### Query: Selectable Resources - This query allows for listing selectable resources in the dropdown list for this property shape. You need to provide the projection variable `resource` in your query. @@ -255,34 +208,26 @@ Beta Feature: This query will be used as well to populate the selectable resourc Used Path: `shui:uiQuery` - ### Inverse Path - Default is false. A value of true inverts the expected / created direction of a relation. Used Path: `shui:inversePath` - ### Deny new resources - A value of true disables the option to create new resources. Used Path: `shui:denyNewResources` - ### Resource viewer widget - Selects default object relation resource viewer widget. (NOTE: shacl2 only) Used Path: `shui:viewResourcesWithWidget` - ### Node shape - This shape will be used to create an embedded view of the linked resource. Used Path: `shacl:node` @@ -295,31 +240,24 @@ Used Path: `shacl:node` ### Message - If there is a message value, then all validation results produced as a result of this shape will have exactly this message. Used Path: `shacl:message` - ### Severity - Categorize validation results (:Info, :Warning, :Violation). Defaults to :Violation. Used Path: `shacl:severity` - ### Ignore on clone - Disables reusing the value(s) when creating a clone of the resource. Note: This feature was named 'copy' before. Used Path: `shui:ignoreOnCopy` - ### Query: On insert update - This query is executed when a property value is added or changed. The following placeholder can be used in the query text of the SPARQL query: @@ -331,14 +269,11 @@ The following placeholder can be used in the query text of the SPARQL query: - `{{shuiMainResource}}` - refers to the main resource rendered in the start node shape of the currently displayed node shape tree (only relevant in case of sub-shape usage) - `{{shuiObject}}` - the object value of the statement matched by the property shape - `{{shuiProperty}}` - the IRI of the property of the statement matched by the property shape - Used Path: `shui:onInsertUpdate` - ### Query: On delete update - A query which is executed when the statement the property shape applies to gets deleted. The following placeholder can be used in the query text of the SPARQL query: @@ -350,14 +285,11 @@ The following placeholder can be used in the query text of the SPARQL query: - `{{shuiMainResource}}` - refers to the main resource rendered in the start node shape of the currently displayed node shape tree (only relevant in case of sub-shape usage) - `{{shuiObject}}` - the object value of the statement matched by the property shape - `{{shuiProperty}}` - the IRI of the property of the statement matched by the property shape - Used Path: `shui:onDeleteUpdate` - ### Target Graph Template - Graph templates can be used to enforce writing statement in specific graphs rather than into the selected graph. Graph templates can be added to node and property shapes. A template on a property shape is used only for overwriting a template on a node shape (without a node shape graph template, they do not have an effect). Used Path: `shui:targetGraphTemplate` @@ -370,15 +302,12 @@ Used Path: `shui:targetGraphTemplate` ### Enable - A value of true enables visualisation and management capabilities of statement annotations (reification) for all statements which are shown via this shape. Used Path: `shui:enableStatementLevelMetadata` - ### Provided Shapes - Instead of providing all possible statement annotation node shapes for the creation of new statement annotations, this property will limit the list to the selected shapes only. Used Path: `shui:provideStatementLevelMetadataShapes` diff --git a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/workflow-trigger/index.md b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/workflow-trigger/index.md index 11af58202..bb2605f56 100644 --- a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/workflow-trigger/index.md +++ b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/workflow-trigger/index.md @@ -22,11 +22,11 @@ A workflow trigger resource references a data integration workflow by URI. To define a workflow trigger the following information is needed: -- **Label**: The trigger resource needs a label (can be given in different languages), which is used for the button presentation. -- **Description**: The trigger resource needs a description, which is used as text that is sitting left of the button for further documentation of the activity to the user. -- **Workflow**: the workflow parameter defines the workflow that shall be executed upon clicking the button. The workflow can be selected from a dropdown list. -- **Refresh View**: can be either `true `or `false`*.* If this value is set to `true`, the view that contains the workflow trigger will be reloaded upon workflow completion -- **Send Resource Reference**: can be either `true `or `false`. If this value is set to `true`, a payload that consists of the *resource IRI* that is represented in the view as well as the *graph IRI *of the graph that is currently selected*.* +- **Label**: The trigger resource needs a label (can be given in different languages), which is used for the button presentation. +- **Description**: The trigger resource needs a description, which is used as text that is sitting left of the button for further documentation of the activity to the user. +- **Workflow**: the workflow parameter defines the workflow that shall be executed upon clicking the button. The workflow can be selected from a dropdown list. +- **Refresh View**: can be either `true`or `false`*.* If this value is set to `true`, the view that contains the workflow trigger will be reloaded upon workflow completion +- **Send Resource Reference**: can be either `true`or `false`. If this value is set to `true`, a payload that consists of the *resource IRI* that is represented in the view as well as the *graph IRI *of the graph that is currently selected*.* ## Integration @@ -49,5 +49,4 @@ Workflow Payload ``` - `graphIRI` is the IRI of the graph that is currently viewed, and -- `resourceIRI `is the IRI of the resource that is viewed. - +- `resourceIRI`is the IRI of the resource that is viewed. diff --git a/docs/explore-and-author/graph-exploration/index.md b/docs/explore-and-author/graph-exploration/index.md index 2911809bd..cc50cd166 100644 --- a/docs/explore-and-author/graph-exploration/index.md +++ b/docs/explore-and-author/graph-exploration/index.md @@ -45,10 +45,10 @@ In the main area, the Metadata view of the selected graph appears, showing sev The Graphs are categorized into groups as follows: -- User: All graphs which represent user data (created manually or by build processes). -- Vocabularies: All graphs containing vocabularies. -- System: All graphs containing configuration data. -- All +- User: All graphs which represent user data (created manually or by build processes). +- Vocabularies: All graphs containing vocabularies. +- System: All graphs containing configuration data. +- All You can search for a specific graph with **:eccenca-module-search: Search**. @@ -71,9 +71,9 @@ To add a new graph to the Graphs list: To download a graph from the Graphs list: -- In the **Graphs** list, click **:eccenca-item-download: Download graph** on the graph you want to download. -- A message box appears, stating that downloading can take a long time. -- Click **Download**. +- In the **Graphs** list, click **:eccenca-item-download: Download graph** on the graph you want to download. +- A message box appears, stating that downloading can take a long time. +- Click **Download**. ### :eccenca-item-edit: Managing a graph @@ -193,7 +193,7 @@ This tab shows all resources that link back to the selected resource. This tab shows the turtle RDF representation of the raw data representing the resource. You can use this tab to edit the selected resource: -- Enter your changes in turtle. -- Click **UPDATE** to save your changes. +- Enter your changes in turtle. +- Click **UPDATE** to save your changes. Deleting the entire turtle representation deletes the resource. diff --git a/docs/explore-and-author/graph-exploration/statement-annotations/index.md b/docs/explore-and-author/graph-exploration/statement-annotations/index.md index 0fb958efe..cfb39d898 100644 --- a/docs/explore-and-author/graph-exploration/statement-annotations/index.md +++ b/docs/explore-and-author/graph-exploration/statement-annotations/index.md @@ -10,9 +10,9 @@ tags: Statement Annotations provide a way to express knowledge about statements. Typical use cases for Statement Annotations include: -- the temporal validity of information, -- the origin of information, or -- just a way to annotate a specific statement with a human readable comment. +- the temporal validity of information, +- the origin of information, or +- just a way to annotate a specific statement with a human readable comment. ## Usage @@ -22,9 +22,9 @@ If enabled on a specific type of statement or type of resource, you see a Statem This bubble has different status: -- A **empty text bubble** indicates, that there is no annotation on the statement, but the annotation feature is enabled for this statement. -- A **filled text bubble** indicates, that there is at least one annotation on the statement. -- **No bubble** indicates, that the annotation feature is NOT enabled on this type of statement. +- A **empty text bubble** indicates, that there is no annotation on the statement, but the annotation feature is enabled for this statement. +- A **filled text bubble** indicates, that there is at least one annotation on the statement. +- **No bubble** indicates, that the annotation feature is NOT enabled on this type of statement. Clicking on one of the text bubbles opens the Statement Annotation dialog for this specific statement: @@ -102,4 +102,3 @@ WHERE { } } ``` - diff --git a/docs/explore-and-author/graph-exploration/versioning-of-graph-changes/index.md b/docs/explore-and-author/graph-exploration/versioning-of-graph-changes/index.md index 521afec9f..ddd64b392 100644 --- a/docs/explore-and-author/graph-exploration/versioning-of-graph-changes/index.md +++ b/docs/explore-and-author/graph-exploration/versioning-of-graph-changes/index.md @@ -43,4 +43,3 @@ For each editing activity (→ Save a Form), a ChangeSet resource will be create This resource has some metadata (user, timestamp, label) as well as links to added and deleted Statements (using RDF Reification). The details of the used vocabulary are available at the [Changeset Vocabulary](https://vocab.org/changeset/) page. - diff --git a/docs/explore-and-author/index.md b/docs/explore-and-author/index.md index b7f9d91b5..47989f4c5 100644 --- a/docs/explore-and-author/index.md +++ b/docs/explore-and-author/index.md @@ -18,29 +18,28 @@ You will also learn how we make use of [SHACL Shapes](https://www.w3.org/TR/sha
-- :eccenca-application-explore: [Graph Exploration](graph-exploration/index.md) +- :eccenca-application-explore: [Graph Exploration](graph-exploration/index.md) --- This module provides a generic and extensible RDF data browser and editor. -- :eccenca-application-vocabularies: [Vocabulary Catalog](vocabulary-catalog/index.md) +- :eccenca-application-vocabularies: [Vocabulary Catalog](vocabulary-catalog/index.md) --- This module allows for managing vocabularies in Corporate Memory that are accessible for the user. -- :eccenca-module-thesauri: [Thesauri Management](thesauri-management/index.md) +- :eccenca-module-thesauri: [Thesauri Management](thesauri-management/index.md) --- The Thesaurus module provides a user interface to create, browse and edit thesaurus resources and general taxonomical data modeled in SKOS. -- :eccenca-application-queries: [Query Module](query-module/index.md) +- :eccenca-application-queries: [Query Module](query-module/index.md) --- The Query module provides a user interface to store, describe, search and edit SPARQL queries.
- diff --git a/docs/explore-and-author/link-rules/index.md b/docs/explore-and-author/link-rules/index.md index 77b340e5a..9c5f53f67 100644 --- a/docs/explore-and-author/link-rules/index.md +++ b/docs/explore-and-author/link-rules/index.md @@ -37,12 +37,12 @@ Click the :eccenca-toggler-showmore: toggler icon (④) next to a rule to expand In the Details tab (⑦), you’ll find: -- General Information (operators, paths) -- Result Statistics -- Rule Status -- Publication Information -- Activities -- The Custom tab contains any additional user-defined properties. +- General Information (operators, paths) +- Result Statistics +- Rule Status +- Publication Information +- Activities +- The Custom tab contains any additional user-defined properties. In the rule execution panel (⑥), you can **:eccenca-item-start: Execute** the rule, **:eccenca-item-remove: Delete** the rule, **:fontawesome-regular-clone: Clone** the rule, or **:eccenca-item-edit: Modify** its configuration. @@ -107,17 +107,17 @@ This access conditions is used by the Link Rules module to grant write access to In order to enable users to use the Link Rules Module, the following must be allowed (this can be granted in this rule or be defined separately): -- **Allow reading graph** - - Respective data graphs -- **Allow writing graph** - - *CMEM DI Project Config* graph of the "Link Rules" project - - `{IRI of the Link Rules project}-meta` (e.g., `http://di.eccenca.com/project/linkrules-meta` according to the project identifier in this example) -- **Allowed action** - - *Build - Workspace* - - *Explore - Knowledge Graph Exploration* - - *Explore - Link Rules Module* -- **Graph pattern for granting write access** - - Pattern according to the *Output Graph Template*, e.g. `http://eccenca.com/user_rules/result_*` +- **Allow reading graph** + - Respective data graphs +- **Allow writing graph** + - *CMEM DI Project Config* graph of the "Link Rules" project + - `{IRI of the Link Rules project}-meta` (e.g., `http://di.eccenca.com/project/linkrules-meta` according to the project identifier in this example) +- **Allowed action** + - *Build - Workspace* + - *Explore - Knowledge Graph Exploration* + - *Explore - Link Rules Module* +- **Graph pattern for granting write access** + - Pattern according to the *Output Graph Template*, e.g. `http://eccenca.com/user_rules/result_*` ### Workspace configuration @@ -125,18 +125,18 @@ Enable the *Link Rules* module. In the following let us walk through the configuration options. -- **Order**: The position of the Link Rules module in the menu. -- **Template Catalog IRI**: The IRI of the template catalog we just created. As the name implies, the Template Catalog holds all templates for usage in the Link Rules module. Templates allow preselecting patterns for easier rule creation. (`https://ns.eccenca.com/data/linkruletemplates/` in this example) -- **Project ID**: The *Project Identifier* of the Link Rules Build project we just created. Link Rules will be managing this project and add the rules to it. To get the id of a project, open the project in the build component, select the project and choose 'Show identifier' in the context menu, in the top right. The project needs to have a workflow, which is used to execute the rules and has to be configured in this section. Furthermore, the projects needs one or more Datasets, which can be selected in the templates. (`linkrules` in this example) -- **Access Condition IRI**: The IRI of the Access Condition we just created. Explore the `CMEM Access Conditions` graph to find the `Access Condition` rule and its respective IRI. -- **Published Rules Workflow ID**: The *Item Identifier* of the workflow in the Link Rules Build project we just created. -- **Output Graph Template**: A jinja template for the graph IRIs which will hold the results of a linking rule. It needs to include the placeholder `{name}`, for example `http://eccenca.com/user_rules/result_{name}`. -- **Publication Graph Template**: A jinja template, that generates the IRI of the graph, into which the output graph is imported via an `owl:imports`. This can be a simple graph IRI, for example `http://eccenca.com/user_rules/results_published`. -- **Result Download Query** (optional): A query that will be used to download the results of a linking rule. By default all resource IRIs will be listed as `resource1` and `resource2`. Whenever you want to provide other data via the *Download results* function, such as a specific identifier or a label instead of the IRI, this query can be adapted here. -- **Annotation Class IRI** (optional): Rule results can be enriched with annotations. Instances of this class can be selected in the link rules interface. Instances have both a connection to the rule and the result. The connection is defined by the *Annotation Link Property IRI*. The values of the annotation are defined by the *Annotation Value Property IRI*. This relation is materialized as `rdf:value` in the output graph upon execution. -- **Annotation Value Property IRI** (optional): Values of sub-properties of this property are actually connected as `rdf:values` to the generated links of a rule. -- **Annotation Link Property IRI** (optional): The connection between a rule and an annotation resource (i.e. instance of *Annotation Class IRI*) is defined by this property. -- **Annotation Graph IRI** (optional): The IRI of the graph, from which instances of the *Annotation Class IRI* are selected. +- **Order**: The position of the Link Rules module in the menu. +- **Template Catalog IRI**: The IRI of the template catalog we just created. As the name implies, the Template Catalog holds all templates for usage in the Link Rules module. Templates allow preselecting patterns for easier rule creation. (`https://ns.eccenca.com/data/linkruletemplates/` in this example) +- **Project ID**: The *Project Identifier* of the Link Rules Build project we just created. Link Rules will be managing this project and add the rules to it. To get the id of a project, open the project in the build component, select the project and choose 'Show identifier' in the context menu, in the top right. The project needs to have a workflow, which is used to execute the rules and has to be configured in this section. Furthermore, the projects needs one or more Datasets, which can be selected in the templates. (`linkrules` in this example) +- **Access Condition IRI**: The IRI of the Access Condition we just created. Explore the `CMEM Access Conditions` graph to find the `Access Condition` rule and its respective IRI. +- **Published Rules Workflow ID**: The *Item Identifier* of the workflow in the Link Rules Build project we just created. +- **Output Graph Template**: A jinja template for the graph IRIs which will hold the results of a linking rule. It needs to include the placeholder `{name}`, for example `http://eccenca.com/user_rules/result_{name}`. +- **Publication Graph Template**: A jinja template, that generates the IRI of the graph, into which the output graph is imported via an `owl:imports`. This can be a simple graph IRI, for example `http://eccenca.com/user_rules/results_published`. +- **Result Download Query** (optional): A query that will be used to download the results of a linking rule. By default all resource IRIs will be listed as `resource1` and `resource2`. Whenever you want to provide other data via the *Download results* function, such as a specific identifier or a label instead of the IRI, this query can be adapted here. +- **Annotation Class IRI** (optional): Rule results can be enriched with annotations. Instances of this class can be selected in the link rules interface. Instances have both a connection to the rule and the result. The connection is defined by the *Annotation Link Property IRI*. The values of the annotation are defined by the *Annotation Value Property IRI*. This relation is materialized as `rdf:value` in the output graph upon execution. +- **Annotation Value Property IRI** (optional): Values of sub-properties of this property are actually connected as `rdf:values` to the generated links of a rule. +- **Annotation Link Property IRI** (optional): The connection between a rule and an annotation resource (i.e. instance of *Annotation Class IRI*) is defined by this property. +- **Annotation Graph IRI** (optional): The IRI of the graph, from which instances of the *Annotation Class IRI* are selected. When the configuration is saved the *Link Rules module* should appear in the *Build* section of the menu. @@ -146,15 +146,15 @@ Templates can be managed at *Link Rules module* -> *Manage Templates*. Create a new Link Rule Template. Such a template describes an abstract link rule with pre-defined settings. -- **Label**: A mandatory label for the template. -- **Target Property**: The (default) connecting property for the derived link rules. The property can be individually set for each link rule later on. -- **Input** group: The datasets which hold the subjects (source) and objects (target) to link. -- **Source/Target Dataset**: The dataset holding the resources to link. -- **Source/Target Resource Pattern**: A filter description template for the resources to link. This is a JSON object as described in [Graph Resource Pattern](#graph-resource-pattern). When creating a Link Rule based on this template these resource patterns can be altered. So this template may remain simple, but should provide sufficient help for the end user to define a proper resource selection. -- **Output** group: The dataset which will hold the Link Rule results. -- **Output Graph**: The graph IRI where to write the Link Rule results. Available placeholders: `{name}` for the Link Rules name. +- **Label**: A mandatory label for the template. +- **Target Property**: The (default) connecting property for the derived link rules. The property can be individually set for each link rule later on. +- **Input** group: The datasets which hold the subjects (source) and objects (target) to link. +- **Source/Target Dataset**: The dataset holding the resources to link. +- **Source/Target Resource Pattern**: A filter description template for the resources to link. This is a JSON object as described in [Graph Resource Pattern](#graph-resource-pattern). When creating a Link Rule based on this template these resource patterns can be altered. So this template may remain simple, but should provide sufficient help for the end user to define a proper resource selection. +- **Output** group: The dataset which will hold the Link Rule results. +- **Output Graph**: The graph IRI where to write the Link Rule results. Available placeholders: `{name}` for the Link Rules name. -##### Graph Resource Pattern +#### Graph Resource Pattern This section specifies the JSON object used to provide a search filter configuration for source and target resources used for the Link Rule. @@ -193,15 +193,15 @@ In the `paths` section triple patterns are defined using `subjectVarName`, `pred The `pathFilters` section defines restrictions on the variables defined above. The following path filter properties can be used: -- `isNoneOfLiteral` -- `isNoneOfResource` -- `varIsAnyOneOfLiteral` -- `varIsAnyOneOfResource` -- `literalFilters` -- `GreaterThan` -- `LessThan` -- `GreaterEqualsThan` -- `LessEqualThan` -- `NotEquals` -- `Contains` -- `Regex` +- `isNoneOfLiteral` +- `isNoneOfResource` +- `varIsAnyOneOfLiteral` +- `varIsAnyOneOfResource` +- `literalFilters` +- `GreaterThan` +- `LessThan` +- `GreaterEqualsThan` +- `LessEqualThan` +- `NotEquals` +- `Contains` +- `Regex` diff --git a/docs/explore-and-author/thesauri-management/index.md b/docs/explore-and-author/thesauri-management/index.md index c20b7fb05..94128e208 100644 --- a/docs/explore-and-author/thesauri-management/index.md +++ b/docs/explore-and-author/thesauri-management/index.md @@ -149,8 +149,8 @@ You can add, for example, a second broader concept for an existing concept or a To add relations, select the concept in the navigation tree. In the detail view, click **:eccenca-item-edit: Edit** to open the edit mode. -- To add an associative relation to another concept, enter the concept name in the field **Related concept**. -- To add a further broader relation, enter the name of the broader concept in the field **Broader concepts**. +- To add an associative relation to another concept, enter the concept name in the field **Related concept**. +- To add a further broader relation, enter the name of the broader concept in the field **Broader concepts**. You can only choose from existing concepts. Click **SAVE** to confirm your changes. diff --git a/docs/explore-and-author/vocabulary-catalog/index.md b/docs/explore-and-author/vocabulary-catalog/index.md index 93910118f..66ba14d64 100644 --- a/docs/explore-and-author/vocabulary-catalog/index.md +++ b/docs/explore-and-author/vocabulary-catalog/index.md @@ -30,8 +30,8 @@ A vocabulary which is known and available but not installed, looks like this: Example of extended information of uninstalled Vocabulary Catalog -- Use **Install** or the switch in the column `Installed` to install the Catalog. -- Use **View** to access the Vocabulary. +- Use **Install** or the switch in the column `Installed` to install the Catalog. +- Use **View** to access the Vocabulary. A vocabulary which is installed looks like this @@ -39,6 +39,6 @@ A vocabulary which is installed looks like this Example of extended information of installed Vocabulary Catalog -- Use **Uninstall** to remove an installed vocabulary or **Install** to install a vocabulary. -- Use **View** to access the Vocabulary. -- Use **Upload** to install or overwrite the vocabulary from a file. +- Use **Uninstall** to remove an installed vocabulary or **Install** to install a vocabulary. +- Use **View** to access the Vocabulary. +- Use **Upload** to install or overwrite the vocabulary from a file. diff --git a/docs/getting-started/index.md b/docs/getting-started/index.md index 813a72490..6c6de8e94 100644 --- a/docs/getting-started/index.md +++ b/docs/getting-started/index.md @@ -24,21 +24,21 @@ eccenca Corporate Memory is a semantic data management software that accelerates The main features of Corporate Memory include: -- Flexible metadata and schema layer based on knowledge graphs -- Data virtualization and analytics -- Data integration and indexing -- Dataset and vocabulary management -- Thesaurus and taxonomy management -- Big data scalability -- Access control +- Flexible metadata and schema layer based on knowledge graphs +- Data virtualization and analytics +- Data integration and indexing +- Dataset and vocabulary management +- Thesaurus and taxonomy management +- Big data scalability +- Access control ### Minimal requirements For the best user experience, we recommend to use the newest version of Google Chrome or Mozilla Firefox. Corporate Memory is tested with the following browsers: -- Google Chrome 83 or later -- Mozilla Firefox 78 or later -- Microsoft Edge 83 (on Windows) or later +- Google Chrome 83 or later +- Mozilla Firefox 78 or later +- Microsoft Edge 83 (on Windows) or later ## Login and Logout @@ -71,16 +71,16 @@ To access a module, click the module name. The active module is highlighted. By default, Corporate Memory provides the following modules: -- EXPLORE - for Knowledge Graph browsing and exploration, specifically - - [Knowledge Graphs](../explore-and-author/index.md) - a generic and extensible RDF data browser and editor - - [Vocabularies](../explore-and-author/vocabulary-catalog/index.md) - for vocabulary management - - [Thesauri](../explore-and-author/thesauri-management/index.md) - for managing thesauri and taxonomies based on SKOS - - [Queries](../explore-and-author/query-module/index.md) - a SPARQL query interface -- [BUILD](../build/index.md) - for creating and integrating Knowledge Graphs, with specific links to - - Projects - the BUILD Projects level - - Datasets - the Datasets across all BUILD Projects - - Workflows - the Workflows across all BUILD Projects - - Activities - activities overview and monitoring +- EXPLORE - for Knowledge Graph browsing and exploration, specifically + - [Knowledge Graphs](../explore-and-author/index.md) - a generic and extensible RDF data browser and editor + - [Vocabularies](../explore-and-author/vocabulary-catalog/index.md) - for vocabulary management + - [Thesauri](../explore-and-author/thesauri-management/index.md) - for managing thesauri and taxonomies based on SKOS + - [Queries](../explore-and-author/query-module/index.md) - a SPARQL query interface +- [BUILD](../build/index.md) - for creating and integrating Knowledge Graphs, with specific links to + - Projects - the BUILD Projects level + - Datasets - the Datasets across all BUILD Projects + - Workflows - the Workflows across all BUILD Projects + - Activities - activities overview and monitoring !!! note diff --git a/docs/getting-started/with-your-sandbox/index.md b/docs/getting-started/with-your-sandbox/index.md index 20ea4315b..6fbe797cd 100644 --- a/docs/getting-started/with-your-sandbox/index.md +++ b/docs/getting-started/with-your-sandbox/index.md @@ -17,7 +17,7 @@ eccenca's Corporate Memory is a platform for creating and managing Enterprise Kn
-- :material-rocket-launch:{ .lg .middle } __Get Started and Get Help__ +- :material-rocket-launch:{ .lg .middle } __Get Started and Get Help__ --- @@ -29,33 +29,33 @@ eccenca's Corporate Memory is a platform for creating and managing Enterprise Kn Find and contact us at: [:simple-github:](https://github.com/eccenca){target=_blank} • [:simple-x:](https://x.com/eccenca){target=_blank} • [:simple-linkedin:](https://de.linkedin.com/company/eccenca-gmbh){target=_blank} • [:octicons-mail-24:](mailto:info@eccenca.com) -- :fontawesome-brands-dropbox:{ .lg .middle } __Sandbox Resources__ +- :fontawesome-brands-dropbox:{ .lg .middle } __Sandbox Resources__ --- The sandbox includes a sample build project named _"Product Data Integration Demo"_ and the graphs generated by that project, as well as an integration graph as an entry point: _"Products - Integration"_. Shacl shapes are provided for the product vocabulary. These are used in the Business Knowledge Editor module for visual exploration as well as in a custom workspace configuration called _Product Data Integration_ to demonstrate how the user interface can be customized. -- :fontawesome-solid-graduation-cap:{ .lg .middle } __Masterclass Material__ +- :fontawesome-solid-graduation-cap:{ .lg .middle } __Masterclass Material__ --- - A list of materials and resources to reproduce and follow the masterclass session: _From Zero to KG Hero: Boosting Your KG Creation Productivity with eccenca Corporate Memory_. Originally presented at **The Knowledge Graph Conference 2023**. [Watch the recording on :simple-youtube:](https://youtu.be/qD-hge6gyIE){target=_blank}. + A list of materials and resources to reproduce and follow the masterclass session: _From Zero to KG Hero: Boosting Your KG Creation Productivity with eccenca Corporate Memory_. Originally presented at __The Knowledge Graph Conference 2023__. [Watch the recording on :simple-youtube:](https://youtu.be/qD-hge6gyIE){target=_blank}. [:octicons-arrow-right-24: materials and resources](./material.md){target=_blank} -- :material-script-text-play-outline:{ .lg .middle } __Tutorials and Examples__ +- :material-script-text-play-outline:{ .lg .middle } __Tutorials and Examples__ --- Our [tutorials](../../tutorials/index.md){target=_blank} help you to create Knowledge Graphs and to use the exploration and consumption features. To get started, we recommend: - - [Lift tabular sources (CSV, XSLX, JDBC)](../../build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md){target=_blank} - - [Active Learning of Linking Rules](../../build/active-learning/index.md){target=_blank} - - [Building a customized User Interface](../../explore-and-author/graph-exploration/building-a-customized-user-interface/index.md){target=_blank} - - [Populate Data to Neo4j](../../consume/populate-data-to-neo4j/index.md){target=_blank} - - [Data in any Format via Custom API](../../consume/provide-data-in-any-format-via-a-custom-api/index.md){target=_blank} + - [Lift tabular sources (CSV, XSLX, JDBC)](../../build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md){target=_blank} + - [Active Learning of Linking Rules](../../build/active-learning/index.md){target=_blank} + - [Building a customized User Interface](../../explore-and-author/graph-exploration/building-a-customized-user-interface/index.md){target=_blank} + - [Populate Data to Neo4j](../../consume/populate-data-to-neo4j/index.md){target=_blank} + - [Data in any Format via Custom API](../../consume/provide-data-in-any-format-via-a-custom-api/index.md){target=_blank} -- :material-graph:{ .lg .middle } __BUILD__ +- :material-graph:{ .lg .middle } __BUILD__ --- @@ -63,7 +63,7 @@ eccenca's Corporate Memory is a platform for creating and managing Enterprise Kn [:octicons-arrow-right-24: Learn more about _Build_](../../build/index.md){target=_blank} -- :material-compass-rose:{ .lg .middle } __EXPLORE__ +- :material-compass-rose:{ .lg .middle } __EXPLORE__ --- @@ -71,7 +71,7 @@ eccenca's Corporate Memory is a platform for creating and managing Enterprise Kn [:octicons-arrow-right-24: Learn more about _Explore_](../../explore-and-author/index.md){target=_blank} -- :material-api:{ .lg .middle } __CONSUME__ +- :material-api:{ .lg .middle } __CONSUME__ --- @@ -79,7 +79,7 @@ eccenca's Corporate Memory is a platform for creating and managing Enterprise Kn [:octicons-arrow-right-24: Learn more about _Consume_](../../consume/index.md){target=_blank} -- :other-powerautomate:{ .lg .middle } __AUTOMATE__ +- :other-powerautomate:{ .lg .middle } __AUTOMATE__ --- @@ -87,7 +87,7 @@ eccenca's Corporate Memory is a platform for creating and managing Enterprise Kn [:octicons-arrow-right-24: Learn more about _Automate_](../../automate/index.md){target=_blank} -- :material-account-school:{ .lg .middle } __Training and Certification__ +- :material-account-school:{ .lg .middle } __Training and Certification__ --- @@ -95,7 +95,7 @@ eccenca's Corporate Memory is a platform for creating and managing Enterprise Kn [:octicons-arrow-right-24: register at _eccenca LMS_](https://lms.eccenca.com/){target=_blank} -- :material-information-variant:{ .lg .middle } __About Corporate Memory__ +- :material-information-variant:{ .lg .middle } __About Corporate Memory__ --- diff --git a/docs/getting-started/with-your-sandbox/material.md b/docs/getting-started/with-your-sandbox/material.md index 8f63fbe90..144aba028 100644 --- a/docs/getting-started/with-your-sandbox/material.md +++ b/docs/getting-started/with-your-sandbox/material.md @@ -15,7 +15,7 @@ A list of materials and resources to reproduce and follow the masterclass (MC).
-- ## File resources +- ## File resources --- @@ -29,7 +29,7 @@ A list of materials and resources to reproduce and follow the masterclass (MC). *) vocabulary already installed, attached for information purposes only. -- ## Name(space) suggestions +- ## Name(space) suggestions --- @@ -43,7 +43,7 @@ A list of materials and resources to reproduce and follow the masterclass (MC). | Dataset (KG) | MC Prod - Links | `http://mc.eccenca.com/prod-links/` | | Build Project | MC Product Build Demo | | -- ## Resource IRI suggestions +- ## Resource IRI suggestions --- @@ -53,7 +53,7 @@ A list of materials and resources to reproduce and follow the masterclass (MC). | Employee | `http://mc.eccenca.com/prod-data/empl-{email}` | | Hardware | `http://mc.eccenca.com/prod-data/hw-{id}` | | Price | `http://mc.eccenca.com/prod-data/price-{parent-id}-{currency}` | - | Product Category | `http://mc.eccenca.com/prod-data/prod-cat-{name|uuid}` | + | Product Category | `http://mc.eccenca.com/prod-data/prod-cat-{uuid(name)}` | | Service | `http://mc.eccenca.com/prod-data/srv-{id}` | | Supplier | `http://mc.eccenca.com/prod-data/suppl-{id}` | diff --git a/docs/index.md b/docs/index.md index b3307ba27..66b037bac 100644 --- a/docs/index.md +++ b/docs/index.md @@ -8,59 +8,58 @@ hide:
-- :material-calendar: [Release Notes](release-notes/corporate-memory-24-3/index.md) +- :material-calendar: [Release Notes](release-notes/corporate-memory-24-3/index.md) --- Documentation of changes and enhancements for each version. -- :material-list-status: [Tutorials](tutorials/index.md) +- :material-list-status: [Tutorials](tutorials/index.md) --- Learn by example and step-by-step guidelines to achieve a concrete goal fast. -- :material-arrow-right-thick: [Getting Started](getting-started/index.md) +- :material-arrow-right-thick: [Getting Started](getting-started/index.md) --- This page describes how to work with Corporate Memory and shortly outlines all functionalities of the user interface. -- :material-star: [Build](build/index.md) +- :material-star: [Build](build/index.md) --- Lift your data by integrating multiple datasets into a Knowledge Graph. -- :material-star: [Explore and Author](explore-and-author/index.md) +- :material-star: [Explore and Author](explore-and-author/index.md) --- Explore, author and interact with your Knowledge Graph. -- :material-star: [Consume](consume/index.md) +- :material-star: [Consume](consume/index.md) --- This section outlines how to consume data from the Knowledge Graph. -- :material-star-outline: [Deploy and Configure](deploy-and-configure/index.md) +- :material-star-outline: [Deploy and Configure](deploy-and-configure/index.md) --- Deploy in your own environment. -- :material-star-outline: [Automate](automate/index.md) +- :material-star-outline: [Automate](automate/index.md) --- Setup processes and automate activities based on and towards your Knowledge Graph. -- :material-star-outline: [Develop](develop/index.md) +- :material-star-outline: [Develop](develop/index.md) --- API documentation and programming recipes.
- diff --git a/docs/release-notes/corporate-memory-19-10/index.md b/docs/release-notes/corporate-memory-19-10/index.md index 6e0935b74..345be7bc9 100644 --- a/docs/release-notes/corporate-memory-19-10/index.md +++ b/docs/release-notes/corporate-memory-19-10/index.md @@ -80,56 +80,55 @@ In addition to that, these changes are shipped: In addition to that, multiple performance and stability issues were solved. - ## eccenca DataManager v19.10 This version of eccenca DataManager adds the following new features: -- New module `task` - - Offers a direct resource actions. Interfaces only available by URL. See documentation for more details. - - Path `/task/resource/create` allows to create a new resource by given graph and type. -- General - - Config parameter `js.config.api.defaultTimeout` for default UI queries timeout. - - Config parameter `js.config.resourceTable.timeoutDownload` for Resource Table timeout on download requests on Explore and Query modules. - - Validation of mandatory fields in `shacline` view. - - Add new property `shui:onUpdateUpdate` for `sh:NodeShape`. -- Module Explore - - Config parameter `js.config.modules.explore.graphlist.whiteList` to filter specific graphs. - - Config parameter `js.config.modules.explore.graphlist.internalGraphs` to hide specific graphs. - - Config parameter `js.config.modules.explore.navigation.itemsPerPage` show items per page in navigation box. - - Support for inverse property relations. -- Module Query - - Config parameter `js.config.modules.query.timeout` for manual queries. - - Config parameter `js.config.modules.query.graph` to define the graph were data is saved and requested. +- New module `task` + - Offers a direct resource actions. Interfaces only available by URL. See documentation for more details. + - Path `/task/resource/create` allows to create a new resource by given graph and type. +- General + - Config parameter `js.config.api.defaultTimeout` for default UI queries timeout. + - Config parameter `js.config.resourceTable.timeoutDownload` for Resource Table timeout on download requests on Explore and Query modules. + - Validation of mandatory fields in `shacline` view. + - Add new property `shui:onUpdateUpdate` for `sh:NodeShape`. +- Module Explore + - Config parameter `js.config.modules.explore.graphlist.whiteList` to filter specific graphs. + - Config parameter `js.config.modules.explore.graphlist.internalGraphs` to hide specific graphs. + - Config parameter `js.config.modules.explore.navigation.itemsPerPage` show items per page in navigation box. + - Support for inverse property relations. +- Module Query + - Config parameter `js.config.modules.query.timeout` for manual queries. + - Config parameter `js.config.modules.query.graph` to define the graph were data is saved and requested. In addition to that, these changes are shipped: -- General - - Default pagination size of 20 elements for all Resource Tables. - - Allow datatype `xsd:anyURI` for literals. - - Upgraded to react 16. -- Module Explore - - Merged graph view `RDFDoc` into 'resource details view'. - - Renamed global search label. - - Graph creation will add the type `void:Dataset` instead of `owl:Ontology`. - - Use the label of the type of the instances for the name of the CSV file downloaded from the Resource Table. - - Display the context graph in `properties` and `references` tables. -- Module Dataset - - Adjusted position and tooltip of parameter `uriProperty` in 'Add data stepper'. -- Module Query - - Use the dataset label for the name of the CSV file downloaded from the Resource Table. -- Module Login - - Renew tokens when they expire. -- Module Administration - - Allow to search in IRIs for list of readable and writeable graphs. +- General + - Default pagination size of 20 elements for all Resource Tables. + - Allow datatype `xsd:anyURI` for literals. + - Upgraded to react 16. +- Module Explore + - Merged graph view `RDFDoc` into 'resource details view'. + - Renamed global search label. + - Graph creation will add the type `void:Dataset` instead of `owl:Ontology`. + - Use the label of the type of the instances for the name of the CSV file downloaded from the Resource Table. + - Display the context graph in `properties` and `references` tables. +- Module Dataset + - Adjusted position and tooltip of parameter `uriProperty` in 'Add data stepper'. +- Module Query + - Use the dataset label for the name of the CSV file downloaded from the Resource Table. +- Module Login + - Renew tokens when they expire. +- Module Administration + - Allow to search in IRIs for list of readable and writeable graphs. The following features have been removed in this release: -- Module Explore - - Config parameter `js.config.modules.explore.graphlist.listQuery` which is now obsolete. - - Config parameter `js.config.modules.explore.details.history` which is now obsolete as the feature is no longer supported. - - 'History' tab. -- Module Sync also known as `SubscriptionManagement`. +- Module Explore + - Config parameter `js.config.modules.explore.graphlist.listQuery` which is now obsolete. + - Config parameter `js.config.modules.explore.details.history` which is now obsolete as the feature is no longer supported. + - 'History' tab. +- Module Sync also known as `SubscriptionManagement`. In addition to that, multiple stability issues were solved. @@ -137,34 +136,34 @@ In addition to that, multiple stability issues were solved. This version of eccenca DataPlatform adds the following new features: -- SPARQL 1.1 Query endpoint - - An `in-iris` property to the JSON `search` parameter to enable search over IRIs. - - A `timeout` parameter which allows to configure the maximal amount of milliseconds that a query execution can run. - - Support for Microsoft Excel (`.xlsx`) file download for `SELECT` queries. -- SPARQL 1.1 Update endpoint - - A `timeout` parameter which allows to configure the maximal amount of milliseconds that an update execution can run (Stardog only). -- SPARQL 1.1 Graph Store Protocol - - `multipart/form-data` support for HTTP PUT. - - Added the `timeout` parameter, which allows to configure the maximal amount of milliseconds that a request execution should run. - - Documentation for content negotiation by `format` query parameter. +- SPARQL 1.1 Query endpoint + - An `in-iris` property to the JSON `search` parameter to enable search over IRIs. + - A `timeout` parameter which allows to configure the maximal amount of milliseconds that a query execution can run. + - Support for Microsoft Excel (`.xlsx`) file download for `SELECT` queries. +- SPARQL 1.1 Update endpoint + - A `timeout` parameter which allows to configure the maximal amount of milliseconds that an update execution can run (Stardog only). +- SPARQL 1.1 Graph Store Protocol + - `multipart/form-data` support for HTTP PUT. + - Added the `timeout` parameter, which allows to configure the maximal amount of milliseconds that a request execution should run. + - Documentation for content negotiation by `format` query parameter. The following features have been removed in this release: -- Data Sharing: A WebSub based Publish-Subscribe service for RDF named graphs. -- IoT Permissions Plugin: A plugin which enables the usage of the IoT Permissions Service API 2. -- OAuth 2.0 authorization server: Issues access tokens to a client after successfully authenticating a user. -- Authentication: User management via authentication providers as it was only needed by the OAuth 2.0 authorization server. +- Data Sharing: A WebSub based Publish-Subscribe service for RDF named graphs. +- IoT Permissions Plugin: A plugin which enables the usage of the IoT Permissions Service API 2. +- OAuth 2.0 authorization server: Issues access tokens to a client after successfully authenticating a user. +- Authentication: User management via authentication providers as it was only needed by the OAuth 2.0 authorization server. In addition to that, these changes are shipped: -- Stardog - - Upgraded support to version 7.0.2. - - Versioning does no longer work with Stardog 7. - - Legacy versioning support for Stardog 6 (deprecated). -- OAuth 2.0: Resource protection is now mandatory (can no longer be disabled, use anonymous access instead). -- SPARQL 1.1 Query endpoint - - The value of the `string` property of the JSON `search` parameter is now tokenized which means that each token will be searched separately. Only results matching all tokens will be returned. - - Updated Spring Boot version from 1.5.21 to 1.5.22. +- Stardog + - Upgraded support to version 7.0.2. + - Versioning does no longer work with Stardog 7. + - Legacy versioning support for Stardog 6 (deprecated). +- OAuth 2.0: Resource protection is now mandatory (can no longer be disabled, use anonymous access instead). +- SPARQL 1.1 Query endpoint + - The value of the `string` property of the JSON `search` parameter is now tokenized which means that each token will be searched separately. Only results matching all tokens will be returned. + - Updated Spring Boot version from 1.5.21 to 1.5.22. In addition to that, multiple performance and stability issues were solved. @@ -172,16 +171,16 @@ In addition to that, multiple performance and stability issues were solved. With the removal of the OAuth 2.0 authorization server capability, many configuration properties have been changed. -- Removed - - The properties `oauth2.clients.*` have been removed. - - The properties `authentication.*` have been removed. -- Moved - - The property `oauth2.jwt.signing.verificationKey` has been moved to `security.oauth2.resource.jwt.keyValue` . - - The property `oauth2.anonymous` has been moved to `security.oauth2.resource.anonymous` . - - The claims mapping properties under `oauth2.resourceServer.claimsMapping.*` have been moved to `security.oauth2.resource.jwt.claims.*` . - - The properties `oauth2.authorizeRequests.*` to configure the resources to be protected by the resource server have been moved to `security.oauth2.resource.authorizeRequests.*` . -- Added - - The value of the property `security.oauth2.resource.id`  (defaults to `dataplatform`) must be part of the `aud` (audience) claim in the JWT used to access a protected resource. +- Removed + - The properties `oauth2.clients.*` have been removed. + - The properties `authentication.*` have been removed. +- Moved + - The property `oauth2.jwt.signing.verificationKey` has been moved to `security.oauth2.resource.jwt.keyValue` . + - The property `oauth2.anonymous` has been moved to `security.oauth2.resource.anonymous` . + - The claims mapping properties under `oauth2.resourceServer.claimsMapping.*` have been moved to `security.oauth2.resource.jwt.claims.*` . + - The properties `oauth2.authorizeRequests.*` to configure the resources to be protected by the resource server have been moved to `security.oauth2.resource.authorizeRequests.*` . +- Added + - The value of the property `security.oauth2.resource.id`  (defaults to `dataplatform`) must be part of the `aud` (audience) claim in the JWT used to access a protected resource. Don't forget to update your configuration accordingly. For instance, assuming you have the following old configuration: @@ -211,7 +210,6 @@ oauth2: key: 'groups' ``` - The migrated properties should look like this: ``` yaml @@ -229,4 +227,3 @@ security: groups: groups # optional, defaults to `groups` clientId: azp # optional, defaults to `azp` ``` - diff --git a/docs/release-notes/corporate-memory-20-03/index.md b/docs/release-notes/corporate-memory-20-03/index.md index 28b189c81..2e56e9a49 100644 --- a/docs/release-notes/corporate-memory-20-03/index.md +++ b/docs/release-notes/corporate-memory-20-03/index.md @@ -30,7 +30,6 @@ More detailed release notes for these versions are listed below. ## eccenca DataIntegration v20.03 - This version of eccenca DataIntegration adds the following new features: - Support for additional value types for mapping targets (XML Schema date/time types, duration, etc.). @@ -67,7 +66,6 @@ In addition to that, these changes are shipped: In addition to that, multiple performance and stability issues were solved. - ## eccenca DataManager v20.03 This version of eccenca DataManager adds the following new features: @@ -104,7 +102,6 @@ The following features have been removed in this release: In addition to that, multiple stability issues were solved. - ## eccenca DataPlatform v20.03 This version of eccenca DataPlatform adds the following new features: @@ -154,7 +151,6 @@ This version of eccenca Corporate Memory Control (cmemc) adds the following new - `workspace` command group, to `import` and `export` the workspace - ability to work with SSL enabled deployments (add CA certs) - ## Migration Notes ### DataIntegration @@ -166,7 +162,7 @@ With v20.03 the following changes need to be made in your dataintegration.conf f - both take an arbitrary alpha numerical string of minimum 16 characters length - depending on your deployment set them in your `production.conf` or `application.conf` DataIntegration configuration file -``` +```conf ... play.http.secret.key = "uiodshfoun78qwg8asd7gfasdasddfgn87gsn8fdsngasdfsngf8ds" ... @@ -222,4 +218,3 @@ spring: jwt: jwk-set-uri: http://keycloak:8080/auth/realms/cmem/protocol/openid-connect/certs ``` - diff --git a/docs/release-notes/corporate-memory-20-06/index.md b/docs/release-notes/corporate-memory-20-06/index.md index e3b273db5..a6d70d577 100644 --- a/docs/release-notes/corporate-memory-20-06/index.md +++ b/docs/release-notes/corporate-memory-20-06/index.md @@ -194,4 +194,3 @@ With v20.06 the following changed need to be made: - `cmemc query execute --accept '*'` - `query list` has a different default output, to return to the previous behavior change your cmemc `query list` calls to: - `cmemc query list --id-only` - diff --git a/docs/release-notes/corporate-memory-20-10/index.md b/docs/release-notes/corporate-memory-20-10/index.md index b0c0ca7c4..579256c88 100644 --- a/docs/release-notes/corporate-memory-20-10/index.md +++ b/docs/release-notes/corporate-memory-20-10/index.md @@ -29,7 +29,6 @@ This release delivers the following component versions: More detailed release notes for these versions are listed below. - ## eccenca DataIntegration v20.10.1 This version of eccenca DataIntegration adds the following new features: @@ -102,7 +101,6 @@ Finally, the following performance and stability issues were solved: - Deleting S3 backed resources broken due to a slash added to filenames - Update PostgreSQL driver to v42.2.14 because of security vulnerability - ## eccenca DataManager v20.10.1 This version of eccenca DataManager adds the following new features: @@ -148,7 +146,6 @@ In addition to that, these changes are shipped: In addition to that, multiple performance and stability issues were solved. - ## eccenca DataPlatform v20.10 This version of eccenca DataPlatform adds the following new features: @@ -173,7 +170,6 @@ This version of eccenca DataPlatform adds the following new features: In addition to that, multiple performance and stability issues were solved. - ## eccenca Corporate Memory Control (cmemc) v20.10 This version of cmemc adds the following new features: @@ -196,19 +192,18 @@ In addition to that, these changes are shipped: - The completion of `--filename-template` resulted in files with wrong chars. - The python version is disabled in completion mode. - ## eccenca Corporate Memory PowerBI Connector (v20.10) This release of our PowerBI Connector does not introduce new features or relevant changes. We provided a tutorial on how to use this component: [Consuming Graphs in Power BI](../../consume/consuming-graphs-in-power-bi/index.md) - # Migration Notes ## DataIntegration - XML serialization for meta data elements is not forward compatible, i.e. projects exported with this version cannot be imported in older DataIntegration versions. - The logout URL needs to be set to make sure that DataIntegration also triggers a logout inside the Keycloak instance: - ``` + + ```conf oauth.logoutRedirectUrl = ${DEPLOY_BASE_URL}"/auth/realms/cmem/protocol/openid-connect/logout?redirect_uri="${DEPLOY_BASE_URL} ``` @@ -219,6 +214,7 @@ This release of our PowerBI Connector does not introduce new features or relevan - The modules `build` as well as `datasets` are disabled now by default. - The module `explore` is the default first entry point (`startsWith`). - This section needs to be added to each workspace configuration: + ``` yaml DIWorkspace: enable: true @@ -228,4 +224,3 @@ This release of our PowerBI Connector does not introduce new features or relevan ## cmemc - If your automation scripts rely on the created file name of the project export command, you need to change your scripts and set the old export name explicitly with `-t {{id}}`. - diff --git a/docs/release-notes/corporate-memory-20-12/index.md b/docs/release-notes/corporate-memory-20-12/index.md index 52b48fd4e..e46935de7 100644 --- a/docs/release-notes/corporate-memory-20-12/index.md +++ b/docs/release-notes/corporate-memory-20-12/index.md @@ -152,10 +152,11 @@ We provided a tutorial on how to use this component: [Consuming Graphs in Power ### DataManager - In your workspaces configuration add `DIWorkspace.baseUrl` (mostly this will be `"/dataintegration"`): + ``` yaml js.config.workspaces: default: - ... + ... DIWorkspace: ... baseUrl: /dataintegration @@ -164,4 +165,3 @@ js.config.workspaces: ### cmemc - The `config check` command has been deprecated, please use the `admin status` command instead. - diff --git a/docs/release-notes/corporate-memory-21-02/index.md b/docs/release-notes/corporate-memory-21-02/index.md index d7eeaf025..23f2f19a6 100644 --- a/docs/release-notes/corporate-memory-21-02/index.md +++ b/docs/release-notes/corporate-memory-21-02/index.md @@ -200,4 +200,3 @@ In addition to that, these changes are shipped: - `workflow list` command, use the `--id-only` or `CMEMC_WORKFLOW_LIST_ID_ONLY=true` to get the ID list. - `graph list` command, use `--filter access readonly`|`writeable` instead of `--filter` `readonly`|`writeable` - The command config check was removed (was deprecated in v20.12) - diff --git a/docs/release-notes/corporate-memory-21-04/index.md b/docs/release-notes/corporate-memory-21-04/index.md index 97e4f73d5..e6da032eb 100644 --- a/docs/release-notes/corporate-memory-21-04/index.md +++ b/docs/release-notes/corporate-memory-21-04/index.md @@ -202,4 +202,3 @@ No migration notes - The exit code values of `project import` and `export` commands are fixed (in case of failure) so you may have to change these calls in your scripts. - The deprecated `workspace` command group is now only available as `admin workspace` command group so you have to change these calls in scripts. - diff --git a/docs/release-notes/corporate-memory-21-06/index.md b/docs/release-notes/corporate-memory-21-06/index.md index 302df0235..aab44042f 100644 --- a/docs/release-notes/corporate-memory-21-06/index.md +++ b/docs/release-notes/corporate-memory-21-06/index.md @@ -6,7 +6,6 @@ tags: Corporate Memory 21.06 is the third release in 2021. - ![21.06 Workflow Editor](21.06-WorkflowEditor.png "21.06 Workflow Editor") ![21.06 Vocabulary Viewer](21.06-VocabularyViewer.png "21.06 Vocabulary Viewer") @@ -160,7 +159,8 @@ In addition to that, these changes are shipped: - This does not influence the actual execution of the workflows in any way. - An auto-layouting feature will be added in the future - Plugin configuration has been changed. The 'plugin.blacklist' has been deprecated and will be removed in future versions. See example below for new format: -``` + +```conf pluginRegistry { # External plugins are loaded from this folder pluginFolder = ${elds.home}"/etc/dataintegration/plugins/" @@ -184,5 +184,3 @@ No migration notes ### cmemc - The configuration keys `*_ENTRY` are not supported anymore. In case you used them, switch to `*_PROCESS` configuration - - diff --git a/docs/release-notes/corporate-memory-21-11/index.md b/docs/release-notes/corporate-memory-21-11/index.md index 583aa1b44..9e314fd42 100644 --- a/docs/release-notes/corporate-memory-21-11/index.md +++ b/docs/release-notes/corporate-memory-21-11/index.md @@ -117,7 +117,7 @@ This version of eccenca DataManager adds the following new features: - Vocabs Module - Allow create new empty ontology without uploading a file. - Check if graph exist and show an error while creating a new vocab. -- Explore +- Explore - Allow hide / show the vocab viz module via configuration `details.visualization.enable` - Center automatically load vocab viz on load - Show precise tooltips for controls of vocab viz @@ -162,6 +162,7 @@ This version of eccenca DataPlatform ships the following new features: - Prometheus and Spring metrics endpoints are now exposed per default, i.e. `./actuator/prometheus` or `actuator/metrics` for list and, exemplarily, `./actuator/metrics/cache.size` for the metric of interest, see the [spring doc](https://docs.spring.io/spring-boot/docs/current/reference/html/actuator.html#actuator.metrics.endpoint) for more information. - you can deactivate them using the configuration properties in `application.yml` (or any other spring config) + ``` yaml title="application.yml" endpoint: prometheus: @@ -169,7 +170,9 @@ endpoint: metrics: enabled: false ``` + - Users roles need to match values of `authorization.abox.adminGroup` or `authorization.abox.metricsGroup` role definition for accessing those endpoints. `authorization.abox.metricsGroup` defaults to `metrics`, therefore in keycloak a user needs to `metrics` added as role, for example via a group and groupmapping. + - graphdb lucene index support - the index is used for example in the explore section to allow fast and userfriendly access - Graph List @@ -287,6 +290,7 @@ No migration notes - Jinja templates will no longer fail on unknown tokens. If this was used for signaling errors or fail-fast evaluation, this has to be implemented in regular conditional checks. - Virtuoso config requires adjustments, its HTTP port needs to be configured. - Please ensure, that the configured user has the same access rights in virtuoso via ODBC and HTTP + ``` yaml title="application.yml (old)" sparqlEndpoints: virtuoso: @@ -297,7 +301,9 @@ sparqlEndpoints: username: "dba" password: "dba" ``` + becomes + ``` yaml title="application.yml (new)" sparqlEndpoints: virtuoso: diff --git a/docs/release-notes/corporate-memory-22-1/index.md b/docs/release-notes/corporate-memory-22-1/index.md index f4862eb59..003a10ae6 100644 --- a/docs/release-notes/corporate-memory-22-1/index.md +++ b/docs/release-notes/corporate-memory-22-1/index.md @@ -30,10 +30,10 @@ The highlights of this release are: This release delivers the following component versions: -* eccenca DataPlatform v22.1 -* eccenca DataIntegration v22.1 -* eccenca DataManager v22.1.1 -* eccenca Corporate Memory Control (cmemc) v22.1.1 +- eccenca DataPlatform v22.1 +- eccenca DataIntegration v22.1 +- eccenca DataManager v22.1.1 +- eccenca Corporate Memory Control (cmemc) v22.1.1 More detailed release notes for these versions are listed below. @@ -95,7 +95,6 @@ In addition to that, these changes are shipped: In addition to that, multiple performance and stability issues were solved. - ## eccenca DataManager v22.1.1 This version of eccenca DataManager adds the following new features: @@ -198,7 +197,6 @@ The following commands are deprecated: In addition to that, multiple performance and stability issues were solved. - ## Migration Notes ### DataIntegration @@ -233,4 +231,3 @@ In addition to that, multiple performance and stability issues were solved. - deprecated commands - `admin bootstrap`|`showcase` are deprecated - use `admin store bootstrap`|`showcase` instead - diff --git a/docs/release-notes/corporate-memory-22-2/index.md b/docs/release-notes/corporate-memory-22-2/index.md index cadafdd13..044580b40 100644 --- a/docs/release-notes/corporate-memory-22-2/index.md +++ b/docs/release-notes/corporate-memory-22-2/index.md @@ -12,13 +12,13 @@ Corporate Memory 22.2.3 is the third patch release in the 22.2 release line. The highlights of this release are: -- Build: - - The all new **Active** (Link) **Learning UI** - - Extended **Python Plugin SDK** -- Explore: - - New graph exploration module **EasyNav** -- Automate: - - Tag filter, better status monitoring and complete query management +- Build: + - The all new **Active** (Link) **Learning UI** + - Extended **Python Plugin SDK** +- Explore: + - New graph exploration module **EasyNav** +- Automate: + - Tag filter, better status monitoring and complete query management !!! warning @@ -30,10 +30,10 @@ The highlights of this release are: This release delivers the following component versions: -- eccenca DataPlatform v22.2.2 -- eccenca DataIntegration v22.2.1 -- eccenca DataManager v22.2.3 -- eccenca Corporate Memory Control (cmemc) v22.2 +- eccenca DataPlatform v22.2.2 +- eccenca DataIntegration v22.2.1 +- eccenca DataManager v22.2.3 +- eccenca Corporate Memory Control (cmemc) v22.2 More detailed release notes for these versions are listed below. @@ -41,65 +41,65 @@ More detailed release notes for these versions are listed below. v22.2.1 of eccenca DataIntegration adds the following new features: -- Rule and workflow editors: - - Support automatic scrolling when moving beyond the editor canvas borders on a all drag and edge connect/update operations. -- Added "sort words" transform operator, which sorts words in each value. +- Rule and workflow editors: + - Support automatic scrolling when moving beyond the editor canvas borders on a all drag and edge connect/update operations. +- Added "sort words" transform operator, which sorts words in each value. In addition to that, these changes are included in v22.2.1 of eccenca DataIntegration: -- Rule editors (linking, transform): - - On tab change do not remove the search text, instead select the text to easily overwrite it. - - Allow to search for input paths in the `All` tab. -- If a long-running workflow is executed manually, the same workflow can be started by a scheduler in the background. -- Executing workflows did not occupy a slot in the thread pool (i.e., unlimited workflows could be executed concurrently). -- Generating links could lead to a deadlock, if no slot in the thread pool is available. -- Entering an invalid URI as path input in the linking editor with a knowledge graph as input results in the rule being broken in the editor. -- Linking editor: Show the same property labels in the input path auto-completion as in the tab auto-completion. +- Rule editors (linking, transform): + - On tab change do not remove the search text, instead select the text to easily overwrite it. + - Allow to search for input paths in the `All` tab. +- If a long-running workflow is executed manually, the same workflow can be started by a scheduler in the background. +- Executing workflows did not occupy a slot in the thread pool (i.e., unlimited workflows could be executed concurrently). +- Generating links could lead to a deadlock, if no slot in the thread pool is available. +- Entering an invalid URI as path input in the linking editor with a knowledge graph as input results in the rule being broken in the editor. +- Linking editor: Show the same property labels in the input path auto-completion as in the tab auto-completion. v22.2 of eccenca DataIntegration adds the following new features: -- New active learning UI -- Python plugins: Added context objects that allow accessing context dependent functionalities, such as: - - The current OAuth token - - Updating the execution report (for workflows) - - DI version - - Current project and task identifiers - - Requires `cmem-plugin-base >=2.0.0` -- Workflows search link in main navigation -- Linking rule editor - - Advanced parameter toggle that shows/hides advanced parameters like `weight` and advanced section in rule parameter modal -- Support for sticky notes in both linking and workflow editors -- Parameter `profiling.defaults.noEntities` to configure the default entity limit for profiling operations -- Parameter `org.silkframework.runtime.activity.concurrentExecutions` to set the max. concurrent activity instances -- Support for the `URI attribute` parameter of datasets -- Support for auto-configuration in create/update dialog -- Config parameters: - - `profiling.defaults.noEntities` to configure the default entity limit for profiling operations - - `org.silkframework.runtime.activity.concurrentExecutions` to set the max. concurrent activity instances - - `cors.enabled`, `cors.config.allowOrigins` and `cors.config.allowCredentials` to configure CORS settings +- New active learning UI +- Python plugins: Added context objects that allow accessing context dependent functionalities, such as: + - The current OAuth token + - Updating the execution report (for workflows) + - DI version + - Current project and task identifiers + - Requires `cmem-plugin-base >=2.0.0` +- Workflows search link in main navigation +- Linking rule editor + - Advanced parameter toggle that shows/hides advanced parameters like `weight` and advanced section in rule parameter modal +- Support for sticky notes in both linking and workflow editors +- Parameter `profiling.defaults.noEntities` to configure the default entity limit for profiling operations +- Parameter `org.silkframework.runtime.activity.concurrentExecutions` to set the max. concurrent activity instances +- Support for the `URI attribute` parameter of datasets +- Support for auto-configuration in create/update dialog +- Config parameters: + - `profiling.defaults.noEntities` to configure the default entity limit for profiling operations + - `org.silkframework.runtime.activity.concurrentExecutions` to set the max. concurrent activity instances + - `cors.enabled`, `cors.config.allowOrigins` and `cors.config.allowCredentials` to configure CORS settings In addition to that, these changes are included in v22.2: -- Move `outputTemplate` parameter to advanced section of XML dataset plugin -- Improved performance of conversions to floating point numbers -- Improved linking performance -- Show report on linking execution tab -- When the evaluation fails because of missing paths in the cache give specific error message with node highlighting instead of generic error notification -- Errors in invalid Python packages are recorded and returned, instead of failing -- Size of the activity thread pool can be configured -- Linking rule editor - - Show linking rule label above toolbar when in integrated mode - - Handle "reversible" comparators, e.g. "Greater than", by allowing to switch source/target inputs instead of setting the 'reverse' parameter -- DataPlatform API timeout is configurable now -- Workflow progress information was moved to node footer that is displayed empty when no information is available -- Docker image base: `debian:bullseye-20220912-slim` -- Return 503 error before exceeding the concurrent activity execution limit instead of discarding a running activity instance -- Do not execute empty object mapping rules to improve performance -- Remove root (start) page: - - Redirect to workbench project search page - - Remove legacy workspace link from user menu - - Add "load example project" action to user menu -- Show activity labels instead of IDs in task activity overview +- Move `outputTemplate` parameter to advanced section of XML dataset plugin +- Improved performance of conversions to floating point numbers +- Improved linking performance +- Show report on linking execution tab +- When the evaluation fails because of missing paths in the cache give specific error message with node highlighting instead of generic error notification +- Errors in invalid Python packages are recorded and returned, instead of failing +- Size of the activity thread pool can be configured +- Linking rule editor + - Show linking rule label above toolbar when in integrated mode + - Handle "reversible" comparators, e.g. "Greater than", by allowing to switch source/target inputs instead of setting the 'reverse' parameter +- DataPlatform API timeout is configurable now +- Workflow progress information was moved to node footer that is displayed empty when no information is available +- Docker image base: `debian:bullseye-20220912-slim` +- Return 503 error before exceeding the concurrent activity execution limit instead of discarding a running activity instance +- Do not execute empty object mapping rules to improve performance +- Remove root (start) page: + - Redirect to workbench project search page + - Remove legacy workspace link from user menu + - Add "load example project" action to user menu +- Show activity labels instead of IDs in task activity overview In addition to that, multiple performance and stability issues are addressed. @@ -107,46 +107,46 @@ In addition to that, multiple performance and stability issues are addressed. v22.2.3 of eccenca DataManager has the following fixes: -- LinkRules +- LinkRules - Rule Setup: Fix display of filter v22.2.2 of eccenca DataManager has the following fixes: -- General +- General - Logout in DM also triggers logout in DI -- LinkRules +- LinkRules - Rule Setup: Rule filter correctly displays OneOf and NoneOf - Rule is correctly serialized after editing, preventing the rule contents to be deleted v22.2.1 of eccenca DataManager has the following fixes: -- LinkRules - - Fixed trigger of refetching data after an update - - Display of negative Reference Links +- LinkRules + - Fixed trigger of refetching data after an update + - Display of negative Reference Links v22.2 of eccenca DataManager adds the following new features: -- Navigation - - Add DataIntegration workflows link to main navigation -- Vocabulary Catalog - - Inline vocabulary metadata via (editable) shape - - Ability to activate git synchronization of changes - - Change history with diff view and ability to revert to a specific commit -- Explore - - New (Shacl) Template based graph creation wizard - - Supporting different methods to define / select graph IRIs - - Support for bulk add via `.zip` archives containing multiple RDF files -- i18n - - French translation -- EasyNav - - New graph visualization module - - With search filter configuration - - Bulk node search and bulk add - - Ability to save, load and share explorations +- Navigation + - Add DataIntegration workflows link to main navigation +- Vocabulary Catalog + - Inline vocabulary metadata via (editable) shape + - Ability to activate git synchronization of changes + - Change history with diff view and ability to revert to a specific commit +- Explore + - New (Shacl) Template based graph creation wizard + - Supporting different methods to define / select graph IRIs + - Support for bulk add via `.zip` archives containing multiple RDF files +- i18n + - French translation +- EasyNav + - New graph visualization module + - With search filter configuration + - Bulk node search and bulk add + - Ability to save, load and share explorations In addition to that, these changes are included in v22.2 of eccenca DataManager: -- Increase height of Turtle editor in the resource details view. +- Increase height of Turtle editor in the resource details view. In addition to that, multiple performance and stability issues are addressed. @@ -154,87 +154,87 @@ In addition to that, multiple performance and stability issues are addressed. v22.2.2 of eccenca DataPlatform has the following changes: -- Fixed +- Fixed - reintroduced support for IRI templates in node shapes, with only the uuid placeholder. - Prevent buffer overflow for large query results streaming to client -- Changed +- Changed - Maintenance: Updated Spring Boot to 2.7.8 v22.2.1 of eccenca DataPlatform has the following fixes: -- Update of dependencies because of vulnerabilities i.e. Spring Boot. -- Addition of logstash runtime dependency as to enable json logging. -- GraphDb indices are created without facet option causing problems. -- Fix of memory leak in query monitor causing high heap usage. -- Refactoring of spring integration tests (IT) and inclusion of most tests in the cucumber subproject. +- Update of dependencies because of vulnerabilities i.e. Spring Boot. +- Addition of logstash runtime dependency as to enable json logging. +- GraphDb indices are created without facet option causing problems. +- Fix of memory leak in query monitor causing high heap usage. +- Refactoring of spring integration tests (IT) and inclusion of most tests in the cucumber subproject. v22.2 of eccenca DataPlatform ships the following new features: -- Added support for manual query/update cancellation: - - active for graphdb, stardog, neptune - - DELETE `/api/admin/currentQueries/{queryId}` - - Neptune updates cannot be cancelled because queryId header not processed -- Added support for creation of configured graphdb repository on DP startup - - `store.graphdb.createRepositoryOnStartup`: Flag if repository shall be created on startup (default: false) -- Added support for selective invalidation of caches (graph list, shapes) via Update parsing / GraphDb Change Tracking - - `proxy.cache-selective-invalidation`: true if activated, false otherwise full flush on every write (default: true) - - `store.graphdb.graphDbChangeTrackingActive`: Whether change tracking for updates is active - better results for cache invalidation (default: true) - - `store.graphdb.graphDbChangeTrackingMaxQuadMemory`: Amount of quads as a result of an update which are loaded into memory for analyzing consequences for caches (default: 1000) -- Automatic creation of default `application.yml` and gradle tasks for generation of markdown documentation -- Added endpoints for supporting easynav graph visualizations - - search and resource listing via `/api/search` - - managing of persisted visualisations via `/api/navigate` endpoints -- Added provisioning of jinja templates with provided substitution map for endpoint `/api/custom/{slug}` -- Added property `proxy.descriptionProperties` (analogous to `proxy.labelProperties`) for defining search relevant description properties -- Extend query monitor - - Added fields per entry - - `timeout`: value in ms of the query/update timeout - - `timedOut`: boolean value on whether the query timed out or not - - `cancelled`: boolean value on whether the query has been cancelled manually - - `running`: boolean value on whether the query is currently still being executed - - `affectedGraphs`: on successfully finished query/update the affected graphs are shown (if possible to determine) - - Added property for memory bound consumption in MB for query monitor list - - `proxy.queryMonitorMaxMemoryInMb` (Default: 30) - - Added fields to prometheus metrics endpoint - - `querymonitor_memoryusage_total`: memory usage of query queue in MB - - `querymonitor_queuesize_total`: query queue size -- Extend actuator info endpoint with store backend properties, `/actuator/info`: - - fields under store: - - `type`: same as `store.type` property (MEMORY, HTTP, GRAPHDB, STARDOG, VIRTUOSO, NEPTUNE) - - `version`: if possible / otherwise UNKNOWN - - `host`: if applicable otherwise N/A - - `repository`: if applicable otherwise N/A - - `user`: if applicable otherwise N/A -- Add non-transactional git sync of graph changes - - graphs can be configured via graph configuration for bi-directional git sync - - cf. config properties under `gitSync.*` +- Added support for manual query/update cancellation: + - active for graphdb, stardog, neptune + - DELETE `/api/admin/currentQueries/{queryId}` + - Neptune updates cannot be cancelled because queryId header not processed +- Added support for creation of configured graphdb repository on DP startup + - `store.graphdb.createRepositoryOnStartup`: Flag if repository shall be created on startup (default: false) +- Added support for selective invalidation of caches (graph list, shapes) via Update parsing / GraphDb Change Tracking + - `proxy.cache-selective-invalidation`: true if activated, false otherwise full flush on every write (default: true) + - `store.graphdb.graphDbChangeTrackingActive`: Whether change tracking for updates is active - better results for cache invalidation (default: true) + - `store.graphdb.graphDbChangeTrackingMaxQuadMemory`: Amount of quads as a result of an update which are loaded into memory for analyzing consequences for caches (default: 1000) +- Automatic creation of default `application.yml` and gradle tasks for generation of markdown documentation +- Added endpoints for supporting easynav graph visualizations + - search and resource listing via `/api/search` + - managing of persisted visualisations via `/api/navigate` endpoints +- Added provisioning of jinja templates with provided substitution map for endpoint `/api/custom/{slug}` +- Added property `proxy.descriptionProperties` (analogous to `proxy.labelProperties`) for defining search relevant description properties +- Extend query monitor + - Added fields per entry + - `timeout`: value in ms of the query/update timeout + - `timedOut`: boolean value on whether the query timed out or not + - `cancelled`: boolean value on whether the query has been cancelled manually + - `running`: boolean value on whether the query is currently still being executed + - `affectedGraphs`: on successfully finished query/update the affected graphs are shown (if possible to determine) + - Added property for memory bound consumption in MB for query monitor list + - `proxy.queryMonitorMaxMemoryInMb` (Default: 30) + - Added fields to prometheus metrics endpoint + - `querymonitor_memoryusage_total`: memory usage of query queue in MB + - `querymonitor_queuesize_total`: query queue size +- Extend actuator info endpoint with store backend properties, `/actuator/info`: + - fields under store: + - `type`: same as `store.type` property (MEMORY, HTTP, GRAPHDB, STARDOG, VIRTUOSO, NEPTUNE) + - `version`: if possible / otherwise UNKNOWN + - `host`: if applicable otherwise N/A + - `repository`: if applicable otherwise N/A + - `user`: if applicable otherwise N/A +- Add non-transactional git sync of graph changes + - graphs can be configured via graph configuration for bi-directional git sync + - cf. config properties under `gitSync.*` In addition to that, these changes and fixes are included in v22.2 of eccence DataPlatform: -- New store configuration properties, see below for migration notes -- Changed property for defining select query for graphList - - setting is store dependant and not valid for some stores - - property `proxy.graphListQuery` (`proxy.graph_list_query`) moved to store settings: - - `store.stardog.graphListQuery` - - `store.neptune.graphListQuery` -- Changed property for scheduled cache invalidation - - `proxy.cacheInvalidationCron`: Spring boot cron entry cf. (default: `* */30 * * * *`) - - [https://docs.spring.io/spring-framework/docs/current/reference/html/integration.html#scheduling-cron-expression](https://docs.spring.io/spring-framework/docs/current/reference/html/integration.html#scheduling-cron-expression) -- Library updates including Spring Boot / Stardog -- Changed property for DP query system timeout - - `proxy.queryTimeoutGeneral` -> `store.queryTimeoutGeneral` in ISO 8601 duration format (default: `PT1H`) -- Changed loading of model entities i.e. shapes cache - - load model entities using GSP requests instead of construct queries - - Changed property for base IRI: `files.defaultBaseIri` to `proxy.defaultBaseIri` (default: `http://localhost/`) +- New store configuration properties, see below for migration notes +- Changed property for defining select query for graphList + - setting is store dependant and not valid for some stores + - property `proxy.graphListQuery` (`proxy.graph_list_query`) moved to store settings: + - `store.stardog.graphListQuery` + - `store.neptune.graphListQuery` +- Changed property for scheduled cache invalidation + - `proxy.cacheInvalidationCron`: Spring boot cron entry cf. (default: `* */30 * * * *`) + - [https://docs.spring.io/spring-framework/docs/current/reference/html/integration.html#scheduling-cron-expression](https://docs.spring.io/spring-framework/docs/current/reference/html/integration.html#scheduling-cron-expression) +- Library updates including Spring Boot / Stardog +- Changed property for DP query system timeout + - `proxy.queryTimeoutGeneral` -> `store.queryTimeoutGeneral` in ISO 8601 duration format (default: `PT1H`) +- Changed loading of model entities i.e. shapes cache + - load model entities using GSP requests instead of construct queries + - Changed property for base IRI: `files.defaultBaseIri` to `proxy.defaultBaseIri` (default: `http://localhost/`) The following functionalities have been discontinued: -- Support for provisioned store authorization -- Command line options create-config, update-war -- WAR build target and support for WAR servlet deployment -- Property for DP query system timeout check interval - - `proxy.queryTimeoutCheckCron` not necessary anymore -- Support for multiple endpoints +- Support for provisioned store authorization +- Command line options create-config, update-war +- WAR build target and support for WAR servlet deployment +- Property for DP query system timeout check interval + - `proxy.queryTimeoutCheckCron` not necessary anymore +- Support for multiple endpoints In addition to that, multiple performance and stability issues are addressed. @@ -242,75 +242,75 @@ In addition to that, multiple performance and stability issues are addressed. This version of cmemc adds the following new features: -- `project reload` command - - Reload all tasks of a project from the workspace provider -- `admin workspace python list-plugins` command - - New option `--package-id-only` to output only package IDs -- `admin workspace python install` command completion - - now also provides plugin packages published on pypi.org -- `query status` command - - New filter `query`: - - `graph` - List only queries which affected a certain graph (URL) - - `regex` - List only queries which query text matches a regular expression - - `trace-id` - List only queries which have the specified trace ID - - `user` - List only queries executed by the specified account (URL) - - New values for filter `status`: - - `cancelled`: List only queries which were cancelled - - `timeout`: List only queries which ran into a timeout -- `query cancel` command - - cancel a running query - this stops the execution in the backend - - Depending on the backend store, this will result in a broken result stream (stardog, neptune and virtuoso) or a valid result stream with incomplete results (graphdb) -- `dataset list`|`delete` commands - - New option `--filter` with the following concrete filter - - `project` - filter by project ID - - `regex` - filter by regular expression on the dataset label - - `tag` - filter by tag label - - `type` - filter by dataset type -- `workflow list` command - - New option `--filter` with the following concrete filter - - `project` - filter by project ID - - `regex` - filter by regular expression on the dataset label - - `tag` - filter by tag label - - `io` - filter by io type -- `admin status` command - - overall rewrite - - new table output - - new option `--raw` to output collected status / info values - - new option `--key` to output only specific values - - new option `--enforce-table` to enforce table output of `--key` -- `vocabular import` command - - new option `--namespace`: In case the imported vocabulary file does not include a preferred namespace prefix, you can manually add a namespace prefix -- `workflow io` command - - new flag `--autoconfig` / `--no-autoconfig` for input dataset auto configuration +- `project reload` command + - Reload all tasks of a project from the workspace provider +- `admin workspace python list-plugins` command + - New option `--package-id-only` to output only package IDs +- `admin workspace python install` command completion + - now also provides plugin packages published on pypi.org +- `query status` command + - New filter `query`: + - `graph` - List only queries which affected a certain graph (URL) + - `regex` - List only queries which query text matches a regular expression + - `trace-id` - List only queries which have the specified trace ID + - `user` - List only queries executed by the specified account (URL) + - New values for filter `status`: + - `cancelled`: List only queries which were cancelled + - `timeout`: List only queries which ran into a timeout +- `query cancel` command + - cancel a running query - this stops the execution in the backend + - Depending on the backend store, this will result in a broken result stream (stardog, neptune and virtuoso) or a valid result stream with incomplete results (graphdb) +- `dataset list`|`delete` commands + - New option `--filter` with the following concrete filter + - `project` - filter by project ID + - `regex` - filter by regular expression on the dataset label + - `tag` - filter by tag label + - `type` - filter by dataset type +- `workflow list` command + - New option `--filter` with the following concrete filter + - `project` - filter by project ID + - `regex` - filter by regular expression on the dataset label + - `tag` - filter by tag label + - `io` - filter by io type +- `admin status` command + - overall rewrite + - new table output + - new option `--raw` to output collected status / info values + - new option `--key` to output only specific values + - new option `--enforce-table` to enforce table output of `--key` +- `vocabular import` command + - new option `--namespace`: In case the imported vocabulary file does not include a preferred namespace prefix, you can manually add a namespace prefix +- `workflow io` command + - new flag `--autoconfig` / `--no-autoconfig` for input dataset auto configuration In addition to that, these changes and fixes are included: -- `admin workspace python list-plugins` command - - Additionally outputs the Package ID -- `project import` command - - The project id is now optional when importing project files -- `admin status` command - - new table output (similar to the other tables) - - `status` filter with `error` value - - only execution errors are listed - - this specifically means no cancelled and timeouted queries (they have there own status now) -- Add pysocks dependency to cmempy - - This allows for using the `all_proxy` evironment variable -- `dataset list --raw` output - - output was not a JSON array and not filtered correctly -- cmempy get graph streams - - stream enabled -- `admin status` command - - command will now always return, even if a component is down +- `admin workspace python list-plugins` command + - Additionally outputs the Package ID +- `project import` command + - The project id is now optional when importing project files +- `admin status` command + - new table output (similar to the other tables) + - `status` filter with `error` value + - only execution errors are listed + - this specifically means no cancelled and timeouted queries (they have there own status now) +- Add pysocks dependency to cmempy + - This allows for using the `all_proxy` evironment variable +- `dataset list --raw` output + - output was not a JSON array and not filtered correctly +- cmempy get graph streams + - stream enabled +- `admin status` command + - command will now always return, even if a component is down The following commands are discontinued: -- `admin bootstap` command - - was deprecated in 22.1, use `admin store bootstrap` command instead -- `admin showcase` command - - was deprecated in 22.1, use `admin store showcase` command instead -- `dataset list`|`delete` command - - `--project` option, use `--filter projext XXX` instead +- `admin bootstap` command + - was deprecated in 22.1, use `admin store bootstrap` command instead +- `admin showcase` command + - was deprecated in 22.1, use `admin store showcase` command instead +- `dataset list`|`delete` command + - `--project` option, use `--filter projext XXX` instead In addition to that, multiple performance and stability issues are addressed. @@ -326,7 +326,7 @@ In addition to that, multiple performance and stability issues are addressed. ### DataIntegration -- CSV attributes specified via the `properties` parameter had inconsistent encoding rules. For CSV datasets where the `properties` parameter is used this can lead to changed source paths. +- CSV attributes specified via the `properties` parameter had inconsistent encoding rules. For CSV datasets where the `properties` parameter is used this can lead to changed source paths. #### Python plugins @@ -334,78 +334,78 @@ Due to the added context classes, the signature of a number of functions has bee ##### WorkflowPlugin -- The execute function has a new parameter `context`: - - `def execute(self, inputs: Sequence[Entities], context: ExecutionContext)` +- The execute function has a new parameter `context`: + - `def execute(self, inputs: Sequence[Entities], context: ExecutionContext)` ##### ParameterType -- The `project_id` parameters of the label and the autocompletion functions have been replaced by the PluginContext: - - `def autocomplete(self, query_terms: list[str], context: PluginContext) -> list[Autocompletion]` - - `def label(self, value: str, context: PluginContext) -> Optional[str]` - - The project identifier can still be accessed via `context.project_id` -- The `fromString` function has a new parameter `context`: - - `def from_string(self, value: str, context: PluginContext) -> T` +- The `project_id` parameters of the label and the autocompletion functions have been replaced by the PluginContext: + - `def autocomplete(self, query_terms: list[str], context: PluginContext) -> list[Autocompletion]` + - `def label(self, value: str, context: PluginContext) -> Optional[str]` + - The project identifier can still be accessed via `context.project_id` +- The `fromString` function has a new parameter `context`: + - `def from_string(self, value: str, context: PluginContext) -> T` ### DataPlatform Due to the removed multiple endpoint support the store configuration properties have changed. Please revise your store configuration section(s) in your DataPlatform `application.yml`. The new configuration properties are: -- Type of store (general settings) - - `store.type`: MEMORY, HTTP, GRAPHDB, STARDOG, VIRTUOSO, NEPTUNE - - `store.authorization`: NONE, REWRITE_FROM -- MEMORY: - - `store.memory.files`: List of files loaded on startup -- HTTP: - - `store.http.queryEndpointUrl`: SPARQL Query endpoint (mandatory) - - `store.http.updateEndpointUrl`: SPARQL Update endpoint (mandatory) - - `store.http.graphStoreEndpointUrl`: SPARQL GSP endpoint (optional but highly recommended) - - `store.http.username`: Username (optional) - - `store.http.password`: Password (optional) -- GRAPHDB: - - `store.graphdb.host`: host of graphdb backend (i.e. localhost) - - `store.graphdb.port`: port of graphdb backend (i.e. 7200) - - `store.graphdb.ssl-enabled`: flag if ssl (https) is enabled (default: false) - - `store.graphdb.repository`: name of repository (i.e. cmem) - - `store.graphdb.username`: Username (optional) - - `store.graphdb.password`: Password (optional) - - `store.graphdb.useDirectTransfer`: flag if direct GSP endpoints of graphdb shall be used instead of workbench upload (default: true) - - `store.graphdb.importDirectory`: Import directory to be utilized in the "workbench import with shared folder" approach. - - `store.graphdb.graphDbChangeTrackingActive`: Whether change tracking for updates is active - better results for cache invalidation (default: true) - - `store.graphdb.graphDbChangeTrackingMaxQuadMemory`: Amount of quads as a result of an update which are loaded into memory for analyzing consequences for caches (default: 1000) -- STARDOG: - - `store.stardog.host`: host of stardog backend (i.e. localhost) - - `store.stardog.port`: port of stardog backend (i.e. 5820) - - `store.stardog.ssl-enabled`: flag if ssl (https) is enabled (default: false) - - `store.stardog.repository`: name of repository (i.e. cmem) - - `store.stardog.username`: Username (optional) - - `store.stardog.password`: Password (optional) - - `store.stardog.userPasswordSalt`: salt for generated user password (optional) - - `store.stardog.updateTimeoutInMilliseconds`: Timeout in ms for updates (default: 0 = deactivated) - - `store.stardog.graphListQuery`: Query for graph list - graph must be bound to variable ?g -- NEPTUNE: - - `store.neptune.host`: host of neptune backend (i.e. neptune-cluster123.eu-central-1.neptune.amazonaws.com) - - `store.neptune.port`: port of neptune backend (i.e. 8182) - - `store.neptune.graphListQuery`: Query for graph list - graph must be bound to variable ?g - - Settings under store.neptune.aws (mandatory): - - `store.neptune.aws.region`: AWS region where the configured neptune cluster is located (e.g. eu-central-1) - - `store.neptune.aws.authEnabled`: Flag on whether authentication is enabled on neptune cluster (default: true) - - Settings under `store.neptune.s3` for upload of large files (>150MB uncompressed) (optional): - - `store.neptune.s3.bucketNameOrAPAlias`: Name of bucket or access point for S3 bulk load - - `store.neptune.s3.iamRoleArn`: ARN of role under which neptune cluster loads from S3 - - `store.neptune.s3.bulkLoadThresholdInMb`: Load threshold in MB for GSP access, if graph data greater than S3 upload is used (default: 150) - - `store.neptune.s3.bulkLoadParallelism`: Degree of parallelism for neptune S3 bulk loader (LOW (default), MEDIUM, HIGH, OVERSUBSCRIBE) -- VIRTUOSO: - - `store.virtuoso.host`: host of virtuoso backend (i.e. localhost) - - `store.virtuoso.port`: http port of virtuoso backend (i.e. 8080) - - `store.virtuoso.databasePort`: database port of virtuoso backend (i.e. 1111) - - `store.virtuoso.ssl-enabled`: flag if ssl (https) is enabled (default: false) - - `store.virtuoso.username`: Username (optional) - - `store.virtuoso.password`: Password (optional) +- Type of store (general settings) + - `store.type`: MEMORY, HTTP, GRAPHDB, STARDOG, VIRTUOSO, NEPTUNE + - `store.authorization`: NONE, REWRITE_FROM +- MEMORY: + - `store.memory.files`: List of files loaded on startup +- HTTP: + - `store.http.queryEndpointUrl`: SPARQL Query endpoint (mandatory) + - `store.http.updateEndpointUrl`: SPARQL Update endpoint (mandatory) + - `store.http.graphStoreEndpointUrl`: SPARQL GSP endpoint (optional but highly recommended) + - `store.http.username`: Username (optional) + - `store.http.password`: Password (optional) +- GRAPHDB: + - `store.graphdb.host`: host of graphdb backend (i.e. localhost) + - `store.graphdb.port`: port of graphdb backend (i.e. 7200) + - `store.graphdb.ssl-enabled`: flag if ssl (https) is enabled (default: false) + - `store.graphdb.repository`: name of repository (i.e. cmem) + - `store.graphdb.username`: Username (optional) + - `store.graphdb.password`: Password (optional) + - `store.graphdb.useDirectTransfer`: flag if direct GSP endpoints of graphdb shall be used instead of workbench upload (default: true) + - `store.graphdb.importDirectory`: Import directory to be utilized in the "workbench import with shared folder" approach. + - `store.graphdb.graphDbChangeTrackingActive`: Whether change tracking for updates is active - better results for cache invalidation (default: true) + - `store.graphdb.graphDbChangeTrackingMaxQuadMemory`: Amount of quads as a result of an update which are loaded into memory for analyzing consequences for caches (default: 1000) +- STARDOG: + - `store.stardog.host`: host of stardog backend (i.e. localhost) + - `store.stardog.port`: port of stardog backend (i.e. 5820) + - `store.stardog.ssl-enabled`: flag if ssl (https) is enabled (default: false) + - `store.stardog.repository`: name of repository (i.e. cmem) + - `store.stardog.username`: Username (optional) + - `store.stardog.password`: Password (optional) + - `store.stardog.userPasswordSalt`: salt for generated user password (optional) + - `store.stardog.updateTimeoutInMilliseconds`: Timeout in ms for updates (default: 0 = deactivated) + - `store.stardog.graphListQuery`: Query for graph list - graph must be bound to variable ?g +- NEPTUNE: + - `store.neptune.host`: host of neptune backend (i.e. neptune-cluster123.eu-central-1.neptune.amazonaws.com) + - `store.neptune.port`: port of neptune backend (i.e. 8182) + - `store.neptune.graphListQuery`: Query for graph list - graph must be bound to variable ?g + - Settings under store.neptune.aws (mandatory): + - `store.neptune.aws.region`: AWS region where the configured neptune cluster is located (e.g. eu-central-1) + - `store.neptune.aws.authEnabled`: Flag on whether authentication is enabled on neptune cluster (default: true) + - Settings under `store.neptune.s3` for upload of large files (>150MB uncompressed) (optional): + - `store.neptune.s3.bucketNameOrAPAlias`: Name of bucket or access point for S3 bulk load + - `store.neptune.s3.iamRoleArn`: ARN of role under which neptune cluster loads from S3 + - `store.neptune.s3.bulkLoadThresholdInMb`: Load threshold in MB for GSP access, if graph data greater than S3 upload is used (default: 150) + - `store.neptune.s3.bulkLoadParallelism`: Degree of parallelism for neptune S3 bulk loader (LOW (default), MEDIUM, HIGH, OVERSUBSCRIBE) +- VIRTUOSO: + - `store.virtuoso.host`: host of virtuoso backend (i.e. localhost) + - `store.virtuoso.port`: http port of virtuoso backend (i.e. 8080) + - `store.virtuoso.databasePort`: database port of virtuoso backend (i.e. 1111) + - `store.virtuoso.ssl-enabled`: flag if ssl (https) is enabled (default: false) + - `store.virtuoso.username`: Username (optional) + - `store.virtuoso.password`: Password (optional) ### cmemc -- `dataset list`|`delete command` - - option `--project` is removed - - Please use `--filter project XXX` instead -- `admin status` command - - in case you piped the normal output of this command and reacted on that, you need to use the `--key` command now +- `dataset list`|`delete command` + - option `--project` is removed + - Please use `--filter project XXX` instead +- `admin status` command + - in case you piped the normal output of this command and reacted on that, you need to use the `--key` command now diff --git a/docs/release-notes/corporate-memory-23-1/index.md b/docs/release-notes/corporate-memory-23-1/index.md index 405e14293..a8eaaee63 100644 --- a/docs/release-notes/corporate-memory-23-1/index.md +++ b/docs/release-notes/corporate-memory-23-1/index.md @@ -12,14 +12,14 @@ Corporate Memory 23.1.3 is the second patch release in the 23.1 release line. The highlights of this release are: -- Build: - - Support for **global variables** in dataset and task parameters. - - Extensions to the Python Plugin API, including **autocompleted parameter** type and **password parameter** type. -- Explore: - - Workspaces are now **selectable at runtime**. - - Enhanced **editing capabilities** in the EasyNav editor. -- Automate: - - New **`admin user` command group** for managing user accounts in the Keycloak CMEM realm. +- Build: + - Support for **global variables** in dataset and task parameters. + - Extensions to the Python Plugin API, including **autocompleted parameter** type and **password parameter** type. +- Explore: + - Workspaces are now **selectable at runtime**. + - Enhanced **editing capabilities** in the EasyNav editor. +- Automate: + - New **`admin user` command group** for managing user accounts in the Keycloak CMEM realm. !!! warning @@ -27,11 +27,11 @@ The highlights of this release are: This release delivers the following component versions: -- eccenca DataPlatform v23.1.3 -- eccenca DataIntegration v23.1.2 -- eccenca DataIntegration Python Plugins v3.0.0 -- eccenca DataManager v23.1.5 -- eccenca Corporate Memory Control (cmemc) v23.1.3 +- eccenca DataPlatform v23.1.3 +- eccenca DataIntegration v23.1.2 +- eccenca DataIntegration Python Plugins v3.0.0 +- eccenca DataManager v23.1.5 +- eccenca Corporate Memory Control (cmemc) v23.1.3 More detailed release notes for these versions are listed below. @@ -41,97 +41,97 @@ We're excited to bring you the latest update to DataIntegration v23.1, featuring v23.1.2 of eccenca DataIntegration ships following fixes: -- Saving a transform or linking rule with an operator that references a project resource fails. -- Cannot read large Excel files from S3. +- Saving a transform or linking rule with an operator that references a project resource fails. +- Cannot read large Excel files from S3. v23.1.1 of eccenca DataIntegration ships following fixes: -- Fixed various vulnerabilities by upgrading affected libraries. -- Workflows using the "SPARQL Update query" operator fail with "Need non-empty resource manager" errors. -- use cmem-plugin-base 3.1.0 instead of RC1 -- remove some unused base image packages +- Fixed various vulnerabilities by upgrading affected libraries. +- Workflows using the "SPARQL Update query" operator fail with "Need non-empty resource manager" errors. +- use cmem-plugin-base 3.1.0 instead of RC1 +- remove some unused base image packages v23.1 of eccenca DataIntegration adds the following new features: -- Support for global variables: - - Dataset and task parameters can be set to Jinja templates. - - Templates may access configured global variables. User-defined variables will be added later. - - Global variable resolution is supported by the 'Evaluate template' transform operator. - - Disabled by default. -- Extensions to the Python Plugin API: - - Autocompleted parameter types may declare dependent parameters. - - Password plugin parameter type. - - Custom parameter types can be registered - - For details, see changelog of the cmem-plugin-base module. -- REST endpoint to search for properties in the global vocabulary cache: - - GET /api/workspace/vocabularies/property/search - - Warn of invisible characters in input fields and offer action to remove them from the input string. -- Autocompletion of graph parameters. -- Auto-completion support to linking rule 'link type' parameter. -- Improve handling of replaceable datasets: - - Datasets that can be replaced/configured in a workflow at API request time can be set in the workflow editor. - - This allows for the execution of workflows with mock data, which has not been possible with 'Variable dataset' tasks. -- Allow to config datasets as read-only to prevent accidentally writing into them. -- New resource endpoints to replace the deprecated resource endpoints. See deprecation section for more details. -- Allow to force start activity. -- Rewritten linking evaluation view. +- Support for global variables: + - Dataset and task parameters can be set to Jinja templates. + - Templates may access configured global variables. User-defined variables will be added later. + - Global variable resolution is supported by the 'Evaluate template' transform operator. + - Disabled by default. +- Extensions to the Python Plugin API: + - Autocompleted parameter types may declare dependent parameters. + - Password plugin parameter type. + - Custom parameter types can be registered + - For details, see changelog of the cmem-plugin-base module. +- REST endpoint to search for properties in the global vocabulary cache: + - GET /api/workspace/vocabularies/property/search + - Warn of invisible characters in input fields and offer action to remove them from the input string. +- Autocompletion of graph parameters. +- Auto-completion support to linking rule 'link type' parameter. +- Improve handling of replaceable datasets: + - Datasets that can be replaced/configured in a workflow at API request time can be set in the workflow editor. + - This allows for the execution of workflows with mock data, which has not been possible with 'Variable dataset' tasks. +- Allow to config datasets as read-only to prevent accidentally writing into them. +- New resource endpoints to replace the deprecated resource endpoints. See deprecation section for more details. +- Allow to force start activity. +- Rewritten linking evaluation view. v23.1 of eccenca DataIntegration introduces the following changes: -- Check token expiration (> 5s left) before sending a request to prevent unnecessary request retries. -- 'Concatenate' and 'Concatenate multiple values' transformer: - - In 'glue' parameter value support `\t`, `\n` and `\\` as escaped characters. -- Indexing of levenshtein comparisons can be configured now. -- Rename 'Constant' comparison operator to 'Constant similarity value'. -- Neo4j improvements: - - Support for paths when reading entities (forward and backward operators). - - Using a relation at the end of a path will return the URI of the node. - - The `#id` special path will return the internal node id. -- CSV dataset auto-configuration now supports detecting more encodings for the Charset parameter. -- Changed search behavior in most places to search after typing stops instead of needing to hit the ENTER key: - - In the 'Create new item' dialog hitting the Enter key now has the same effect as clicking the 'Add' button. -- Show value type label primarily instead of ID. -- Show default URI pattern example in a object rule mapping form when the source path is non-empty. -- Response body of a failed REST operator request is also added to the workflow report in addition to being logged. -- Linking execution report has a warning message when the link limit was reduced because of the config of `linking.execution.linkLimit.max`. -- Disable streaming in 'Parse JSON' operator, so backward paths can be used against it. -- Improved online documentation of many rule operators: - - Distance measures: Added information if a measure is either boolean, normalized or unbounded. - - Distance measures: Clarified what happens with multiple values for single value measures. - - Transformers, Distance measures and Aggregators: Added examples +- Check token expiration (> 5s left) before sending a request to prevent unnecessary request retries. +- 'Concatenate' and 'Concatenate multiple values' transformer: + - In 'glue' parameter value support `\t`, `\n` and `\\` as escaped characters. +- Indexing of levenshtein comparisons can be configured now. +- Rename 'Constant' comparison operator to 'Constant similarity value'. +- Neo4j improvements: + - Support for paths when reading entities (forward and backward operators). + - Using a relation at the end of a path will return the URI of the node. + - The `#id` special path will return the internal node id. +- CSV dataset auto-configuration now supports detecting more encodings for the Charset parameter. +- Changed search behavior in most places to search after typing stops instead of needing to hit the ENTER key: + - In the 'Create new item' dialog hitting the Enter key now has the same effect as clicking the 'Add' button. +- Show value type label primarily instead of ID. +- Show default URI pattern example in a object rule mapping form when the source path is non-empty. +- Response body of a failed REST operator request is also added to the workflow report in addition to being logged. +- Linking execution report has a warning message when the link limit was reduced because of the config of `linking.execution.linkLimit.max`. +- Disable streaming in 'Parse JSON' operator, so backward paths can be used against it. +- Improved online documentation of many rule operators: + - Distance measures: Added information if a measure is either boolean, normalized or unbounded. + - Distance measures: Clarified what happens with multiple values for single value measures. + - Transformers, Distance measures and Aggregators: Added examples v23.1 of eccenca DataIntegration ships following fixes: -- Layout breaks on small screens on detail pages of the workspace. -- Mapping suggestion list is empty when there is no matching response even though source paths exist. -- Active Learning shows incorrect entity values. -- Add notes dialog keeps focus when workflow is executed and running. -- Race condition in project/task tag selection. -- Dataset auto-configure parameter changes not set for parameters that support auto-completion. -- Label and description of existing root/object rules cannot be changed. -- DI writes invalid XML, if the last segment of a URI starts with a number. -- Optimize peak endpoint if only one path is requested. -- Python Plugin Environment: package dependencies can not update the base requirements anymore. -- Spinner is being shown eternally when no comparison pairs have been found in the link learning. -- Value path auto-completion can suggest wrong paths if backward paths exist in the paths cache. -- Show spinner while transform examples are requested from the backend. -- Abort a not fully consumed S3 input stream instead of closing it which leads to warnings. -- Date parser fails when no input/output pattern is selected even though an alternative input/output pattern is given. -- Dependent parameter auto-completion using default values of other parameters. -- Support replaceable/variable datasets in nested workflows. -- Display info message when a parameter is disabled because it depends on other parameters to be set. -- 'Fix URI' operator trims the URI before fixing it and tries better to maintain the original URI with only the invalid characters encoded. -- Task completion message is shown without executing the transformation. -- Evaluation in mapping rule editor does not work when inside object mappings. -- Show error message when project import fails because of errors detected in the backend instead of closing the project import modal. -- Linking editor evaluation toolbar component issues. -- Levensthein indexing slow if combined conjunctively. -- Transform execution tab layout issues. +- Layout breaks on small screens on detail pages of the workspace. +- Mapping suggestion list is empty when there is no matching response even though source paths exist. +- Active Learning shows incorrect entity values. +- Add notes dialog keeps focus when workflow is executed and running. +- Race condition in project/task tag selection. +- Dataset auto-configure parameter changes not set for parameters that support auto-completion. +- Label and description of existing root/object rules cannot be changed. +- DI writes invalid XML, if the last segment of a URI starts with a number. +- Optimize peak endpoint if only one path is requested. +- Python Plugin Environment: package dependencies can not update the base requirements anymore. +- Spinner is being shown eternally when no comparison pairs have been found in the link learning. +- Value path auto-completion can suggest wrong paths if backward paths exist in the paths cache. +- Show spinner while transform examples are requested from the backend. +- Abort a not fully consumed S3 input stream instead of closing it which leads to warnings. +- Date parser fails when no input/output pattern is selected even though an alternative input/output pattern is given. +- Dependent parameter auto-completion using default values of other parameters. +- Support replaceable/variable datasets in nested workflows. +- Display info message when a parameter is disabled because it depends on other parameters to be set. +- 'Fix URI' operator trims the URI before fixing it and tries better to maintain the original URI with only the invalid characters encoded. +- Task completion message is shown without executing the transformation. +- Evaluation in mapping rule editor does not work when inside object mappings. +- Show error message when project import fails because of errors detected in the backend instead of closing the project import modal. +- Linking editor evaluation toolbar component issues. +- Levensthein indexing slow if combined conjunctively. +- Transform execution tab layout issues. v23.1 of eccenca DataIntegration introduced the following deprecations: -- Resource endpoints: - - All resources endpoints that have the _file path_ (`workspace/projects/:project/resources/:name`) encoded in the URL path are now deprecated. The files endpoints using a _query parameter_ for the path should be used now. +- Resource endpoints: + - All resources endpoints that have the _file path_ (`workspace/projects/:project/resources/:name`) encoded in the URL path are now deprecated. The files endpoints using a _query parameter_ for the path should be used now. ## eccenca DataIntegration Python Plugins v3.0.0 @@ -139,7 +139,7 @@ Corporate Memory v23.1 includes the DataIntegration Python Plugins support in ve v3.0.0 of eccenca DataIntegration Python Plugins adds the following new features: -- Autocompleted parameter types may declare dependent parameters. For instance, a parameter `city` may declare that its completed values depend on another parameter 'country': +- Autocompleted parameter types may declare dependent parameters. For instance, a parameter `city` may declare that its completed values depend on another parameter 'country': ```py class CityParameterType(StringParameterType): @@ -152,7 +152,7 @@ v3.0.0 of eccenca DataIntegration Python Plugins adds the following new features return ... ``` -- Password plugin parameter type. Passwords will be encrypted in the backend and not shown to users: +- Password plugin parameter type. Passwords will be encrypted in the backend and not shown to users: ```py @Plugin(label="My Plugin") @@ -164,7 +164,7 @@ v3.0.0 of eccenca DataIntegration Python Plugins adds the following new features self.password.decrypt() ``` -- Custom parameter types can be registered. See implementation of `PasswordParameterType` for an example. +- Custom parameter types can be registered. See implementation of `PasswordParameterType` for an example. ## eccenca DataManager v23.1.5 @@ -172,41 +172,41 @@ We are excited to announce the latest update to DataManager v23.1, which introdu v23.1.5 of eccenca DataManager ships following fixes: -- Fixed download of query result in query editor. -- Setting the defaultGraph of a explore workspace configuration no longer prevents the Navigation box from loading. -- Fixes in the LinkRules modules: Result-Details, Rule-Deletions, Property-Search +- Fixed download of query result in query editor. +- Setting the defaultGraph of a explore workspace configuration no longer prevents the Navigation box from loading. +- Fixes in the LinkRules modules: Result-Details, Rule-Deletions, Property-Search v23.1.4 of eccenca DataManager ships following changes: -- Switch from iframe to redirect based login view. - - Known issues: Interactions after the timeout do not always trigger a reload and simply shows error messages or empty results. Using the navigation bar triggers a reload. +- Switch from iframe to redirect based login view. + - Known issues: Interactions after the timeout do not always trigger a reload and simply shows error messages or empty results. Using the navigation bar triggers a reload. v23.1.3 of eccenca DataManager ships following fixes: -- use latest debian:bullseye-20230411-slim base image -- use wget instead of curl +- use latest debian:bullseye-20230411-slim base image +- use wget instead of curl v23.1.2 of eccenca DataManager was a redacted build due to incomplete merge. v23.1.1 of eccenca DataManager ships following fixes: -- Fixes link rules creation dialogue setting a target property. +- Fixes link rules creation dialogue setting a target property. v23.1 of eccenca DataManager adds the following new features: -- Workspaces are selectable at runtime. -- Routes can include a workspace selection. -- Added Editing capabilities to the EasyNav editor. +- Workspaces are selectable at runtime. +- Routes can include a workspace selection. +- Added Editing capabilities to the EasyNav editor. v23.1 of eccenca DataManager introduces the following changes: -- Configuration is now fully retrieved from DataPlatform, the included Spring Boot based backend is solely delivering the javascript frontend. -- The configuration can be changed at runtime using a frontend in the `/admin` Module. Changes are visible with the next full browser reload. -- Authentication is now based on the OAuth2 Code Flow. +- Configuration is now fully retrieved from DataPlatform, the included Spring Boot based backend is solely delivering the javascript frontend. +- The configuration can be changed at runtime using a frontend in the `/admin` Module. Changes are visible with the next full browser reload. +- Authentication is now based on the OAuth2 Code Flow. v23.1 of eccenca DataManager ships following fixes: -- Removed session token from URL. +- Removed session token from URL. ## eccenca DataPlatform v23.1.3 @@ -214,50 +214,50 @@ We're excited to announce the latest update to DataPlatform v23.1, featuring sig v23.1.3 of eccenca DataPlatform ships following fixes: -- Fix wrong calculation of write graph access under certain conditions. +- Fix wrong calculation of write graph access under certain conditions. v23.1.2 of eccenca DataPlatform ships following changes: -- DP/Infinispan: session timeout increased to 10h -- Login: switch from iframe to redirect based flow +- DP/Infinispan: session timeout increased to 10h +- Login: switch from iframe to redirect based flow v23.1.1 of eccenca DataPlatform ships following fixes: -- docker image: use latest debian:bullseye-20230411-slim base image -- docker image: wget instead of curl +- docker image: use latest debian:bullseye-20230411-slim base image +- docker image: wget instead of curl v23.1 of eccenca DataPlatform adds the following new features: -- Added ability to use dynamic access conditions -- Added graph for infos about logged in users (iri, login): - - Can be (de)activated using property `authorization.userInfoGraph.active` (default: true) -- Workspace Selection and Configuration: - - Activate OAuth 2.0 client role permanently - - Redirect login page to (exactly) one configured resource provider - - REST endpoints for workspace configuration +- Added ability to use dynamic access conditions +- Added graph for infos about logged in users (iri, login): + - Can be (de)activated using property `authorization.userInfoGraph.active` (default: true) +- Workspace Selection and Configuration: + - Activate OAuth 2.0 client role permanently + - Redirect login page to (exactly) one configured resource provider + - REST endpoints for workspace configuration v23.1 of eccenca DataPlatform introduces the following changes: -- Integrate infinispan as sole cache provider: - - Enables clustering of DataPlatform instances - - clustering can be activated by `spring.cache.infinispan.mode=CLUSTER` - - Removed property `files.maintenanceCron` (housekeeping done by infinispan) - - Added property `files.storageDirectory` for configuring shared directory between multiple DataPlatform instances - - Replaced property `proxy.cacheInvalidationCron` with `proxy.cacheExpiration` (no scheduled flush anymore but cache expiration as default) -- Changed logic of resolving user rights through access conditions - performance optimized +- Integrate infinispan as sole cache provider: + - Enables clustering of DataPlatform instances + - clustering can be activated by `spring.cache.infinispan.mode=CLUSTER` + - Removed property `files.maintenanceCron` (housekeeping done by infinispan) + - Added property `files.storageDirectory` for configuring shared directory between multiple DataPlatform instances + - Replaced property `proxy.cacheInvalidationCron` with `proxy.cacheExpiration` (no scheduled flush anymore but cache expiration as default) +- Changed logic of resolving user rights through access conditions - performance optimized v23.1 of eccenca DataPlatform ships following fixes: -- Prevent injection of formulas in Excel/CSV exports -- Diagnostic store operations / query rewrite log on logging topic `com.eccenca.elds.backend.sparql.query.diagnostic` - must be set to TRACE: - - Activated update result statistics in existing query result logger -- Missing access condition action resource for EasyNav added +- Prevent injection of formulas in Excel/CSV exports +- Diagnostic store operations / query rewrite log on logging topic `com.eccenca.elds.backend.sparql.query.diagnostic` - must be set to TRACE: + - Activated update result statistics in existing query result logger +- Missing access condition action resource for EasyNav added v23.1 of eccenca DataPlatform removed the following features and configurations: -- Deprecated properties under authorization.accessConditions - - `authorization.accessConditions.graph`: used graph is always the default graph from bootstrap - - `authorization.accessConditions.url`: url as source for access condition not supported anymore +- Deprecated properties under authorization.accessConditions + - `authorization.accessConditions.graph`: used graph is always the default graph from bootstrap + - `authorization.accessConditions.url`: url as source for access condition not supported anymore ## eccenca Corporate Memory Control (cmemc) v23.1.3 @@ -271,34 +271,34 @@ v23.1.3 of eccenca Corporate Memory Control introduces the following security up v23.1.2 of eccenca Corporate Memory Control introduces the following fixes: -- broken installation due to `urllib3` dependency +- broken installation due to `urllib3` dependency - `urllib3>=2` was released 2023-04-26 but is broken with this error: `ImportError: cannot import name 'appengine' from 'urllib3.contrib'` - cmemc requested any version and not `^1.26.15` of this library, which resulted in broken installations with pip beginning from 2023-04-26 - quick fix to solve this without updating cmemc: `pip install urllib3==1.26.15` in the cmemc virtual env v23.1.1 of eccenca Corporate Memory Control introduces the following changes: -- remove some unneeded packages from docker image -- switch to python 3.11.3 base image and tests +- remove some unneeded packages from docker image +- switch to python 3.11.3 base image and tests v23.1 of eccenca Corporate Memory Control adds the following new features: -- `admin status` command: - - option `--exit-1` to specify, when to return non-zero exit code - - currently set to `never`, this will be changed to `always` in the future -- `admin user` command group: - - `create` command - add a user account to the keycloak CMEM realm - - `delete` command - remove a user account from the keycloak CMEM realm - - `list` command - list user accounts in the keycloak CMEM realm - - `password` command - change the accounts password - - `update` command - change a user account in the keycloak CMEM realm -- optional `KEYCLOAK_BASE_URI` config environment -- optional `KEYCLOAK_REALM_ID` config environment +- `admin status` command: + - option `--exit-1` to specify, when to return non-zero exit code + - currently set to `never`, this will be changed to `always` in the future +- `admin user` command group: + - `create` command - add a user account to the keycloak CMEM realm + - `delete` command - remove a user account from the keycloak CMEM realm + - `list` command - list user accounts in the keycloak CMEM realm + - `password` command - change the accounts password + - `update` command - change a user account in the keycloak CMEM realm +- optional `KEYCLOAK_BASE_URI` config environment +- optional `KEYCLOAK_REALM_ID` config environment v23.1 of eccenca Corporate Memory Control introduced the following deprecations: -- `admin status` command `--exit-1` option default - - currently set to `never`, this will be changed to `always` in a future release +- `admin status` command `--exit-1` option default + - currently set to `never`, this will be changed to `always` in a future release ## Migration Notes @@ -312,9 +312,9 @@ v23.1 of eccenca Corporate Memory Control introduced the following deprecations: ### eccenca DataIntegration -- Resource endpoints: - - All resources endpoints that have the _file path_ (`workspace/projects/:project/resources/:name`) encoded in the URL path are now deprecated. - - Use corresponding endpoints starting with `workspace/projects/:project/files` instead, using a _query parameter_ for the file path. +- Resource endpoints: + - All resources endpoints that have the _file path_ (`workspace/projects/:project/resources/:name`) encoded in the URL path are now deprecated. + - Use corresponding endpoints starting with `workspace/projects/:project/files` instead, using a _query parameter_ for the file path. ### eccenca DataIntegration Python Plugins @@ -335,10 +335,10 @@ def label(self, value: str, depend_on_parameter_values: list[Any], context: Plug ### eccenca DataManager -- A manual migration for the graph based configuration of the EasyNav configuration and the graph list configuration of the explore module is necessary. -- A manual migration for the `.yml` based DataManager configuration is necessary. -- The new web based configuration tool can be used to migrate, create and manage your DataManager (workspace) configuration +- A manual migration for the graph based configuration of the EasyNav configuration and the graph list configuration of the explore module is necessary. +- A manual migration for the `.yml` based DataManager configuration is necessary. +- The new web based configuration tool can be used to migrate, create and manage your DataManager (workspace) configuration ### eccenca DataPlatform -- Deprecated properties under `authorization.accessConditions` have been removed. The used graph is always the default graph from bootstrap, and URL as a source for access conditions is not supported anymore. +- Deprecated properties under `authorization.accessConditions` have been removed. The used graph is always the default graph from bootstrap, and URL as a source for access conditions is not supported anymore. diff --git a/docs/release-notes/corporate-memory-23-2/index.md b/docs/release-notes/corporate-memory-23-2/index.md index 9d3e49bc9..7da431060 100644 --- a/docs/release-notes/corporate-memory-23-2/index.md +++ b/docs/release-notes/corporate-memory-23-2/index.md @@ -12,25 +12,25 @@ Corporate Memory 23.2.1 is the first patch release in the 23.2 release line. The highlights of this release are: -- Build: - - Support for user managed **project variables** in dataset and task parameters. - - All new UIs for **transformation evaluation and reference links**. -- Explore: - - New feature in Easynav like - - **nodes context menu**, - - **long label support**, - - advanced **graph selection dialog** and - - **automatic node layout**. -- Automate: - - New **`admin client` command group** for managing client accounts in the Keycloak CMEM realm. +- Build: + - Support for user managed **project variables** in dataset and task parameters. + - All new UIs for **transformation evaluation and reference links**. +- Explore: + - New feature in Easynav like + - **nodes context menu**, + - **long label support**, + - advanced **graph selection dialog** and + - **automatic node layout**. +- Automate: + - New **`admin client` command group** for managing client accounts in the Keycloak CMEM realm. This release delivers the following component versions: -- eccenca DataPlatform v23.2.1 -- eccenca DataIntegration v23.2.1 -- eccenca DataIntegration Python Plugins v4.1.0 -- eccenca DataManager v23.2 -- eccenca Corporate Memory Control (cmemc) v23.2 +- eccenca DataPlatform v23.2.1 +- eccenca DataIntegration v23.2.1 +- eccenca DataIntegration Python Plugins v4.1.0 +- eccenca DataManager v23.2 +- eccenca Corporate Memory Control (cmemc) v23.2 More detailed release notes for these versions are listed below. @@ -40,70 +40,70 @@ We're excited to bring you the latest update to DataIntegration v23.2, featuring v23.2.1 of eccenca DataIntegration ships the following improvements and fixes: -- Added overview listing all available keyboard shortcuts, available by `?` key, or from the user menu. -- Improved vocabulary (needs to be installed separately). -- The file API will set the content type based on the file extension: - - For instance, for a file ending in `.json` the `Content-Type` header will be set to `application/json`. -- Upgraded several libraries to fix vulnerabilities. +- Added overview listing all available keyboard shortcuts, available by `?` key, or from the user menu. +- Improved vocabulary (needs to be installed separately). +- The file API will set the content type based on the file extension: + - For instance, for a file ending in `.json` the `Content-Type` header will be set to `application/json`. +- Upgraded several libraries to fix vulnerabilities. v23.2 of eccenca DataIntegration adds the following new features: -- User-defined project variables: - - Can be used in dataset and task parameters and in the template transform operator. - - Variables may use templates that access other preceding project variables or globally configured variables. -- All new transform evaluation UI. -- All new reference links view. -- Extensions to transform rule and linking rule editors: - - Support for setting a language filter for a path operator conveniently without having to use the language filter syntax. - - Partial linking and transform rule (tree) evaluation. -- Support fixing tasks that have failed loading: - - Allow a user to reload a task. The user may change the original parameters of the task. - - `POST /api/workspace/projects/{projectId}/reloadFailedTask` endpoint that reloads a task with optionally updated parameter values. - - `GET /api/workspace/projects/{projectId}/failedTaskParameters/{taskId}` endpoint that fetches the original parameter values of a failed task. -- Added "Concatenate pairwise" transform operator. -- Make transform suggestion matching link spec / workflow customizable. -- API extensions: - - Transform evaluation endpoint: `/transform/tasks/{projectId}/{transformTaskId}/rule/{ruleId}/evaluated` - - Added endpoints for uploading and downloading files of datasets. - - REST endpoint to fetch dataset characteristics. - - `GET /api/workspace/projects/(projectId)/datasets/{datasetId}/characteristics` - - API endpoint to fetch IDs of file based datasets. - - `/api/core/datasets/resourceBased` - - REST endpoint to copy an arbitrary linking task to the matching linking task that will be used in the mapping suggestion. - - `POST /ontologyMatching/replaceOntologyMatchingLinkSpec` - - Both `matching.external.projectId` and `matching.external.linkSpecId` must be configured - - REST endpoint that generates an ontology matching project and linking tasks based on a specific transformation task. - - `POST /ontologyMatching/generateMatchingLinkRule` +- User-defined project variables: + - Can be used in dataset and task parameters and in the template transform operator. + - Variables may use templates that access other preceding project variables or globally configured variables. +- All new transform evaluation UI. +- All new reference links view. +- Extensions to transform rule and linking rule editors: + - Support for setting a language filter for a path operator conveniently without having to use the language filter syntax. + - Partial linking and transform rule (tree) evaluation. +- Support fixing tasks that have failed loading: + - Allow a user to reload a task. The user may change the original parameters of the task. + - `POST /api/workspace/projects/{projectId}/reloadFailedTask` endpoint that reloads a task with optionally updated parameter values. + - `GET /api/workspace/projects/{projectId}/failedTaskParameters/{taskId}` endpoint that fetches the original parameter values of a failed task. +- Added "Concatenate pairwise" transform operator. +- Make transform suggestion matching link spec / workflow customizable. +- API extensions: + - Transform evaluation endpoint: `/transform/tasks/{projectId}/{transformTaskId}/rule/{ruleId}/evaluated` + - Added endpoints for uploading and downloading files of datasets. + - REST endpoint to fetch dataset characteristics. + - `GET /api/workspace/projects/(projectId)/datasets/{datasetId}/characteristics` + - API endpoint to fetch IDs of file based datasets. + - `/api/core/datasets/resourceBased` + - REST endpoint to copy an arbitrary linking task to the matching linking task that will be used in the mapping suggestion. + - `POST /ontologyMatching/replaceOntologyMatchingLinkSpec` + - Both `matching.external.projectId` and `matching.external.linkSpecId` must be configured + - REST endpoint that generates an ontology matching project and linking tasks based on a specific transformation task. + - `POST /ontologyMatching/generateMatchingLinkRule` v23.2 of eccenca DataIntegration introduces the following changes: -- Close user menu automatically. -- Linking rule config: - - Add 'Inverse link type' parameter that defined a URI that is generated from the target to the source resource, i.e. the inverse of the 'link type' parameter. - - Add 'Is reflexive' parameter that when enabled does not link resources with themselves. -- In a workflow datasets with schema-less inputs, e.g. workflows, other datasets, are not considered to be outputs datasets anymore. -- Variable workflow API: - - Support uploading large input files via multipart/form-data request - - Support custom mime type "application/x-plugin-" in CONTENT-TYPE or ACCEPT in order to support all file based dataset plugins. - - Support query parameters 'config-dataSourceConfig-' and 'config-dataSinkConfig-' to configure dataset parameters of the data source and sink. -- Added read-only / uriProperty dataset attributes to endpoint responses. -- forward cmem-plugin-base to v4.1.0, via base image v2.2.0 -- The handling of errors in transform rules has been aligned with what is already shown in the evaluation: - - If a nested operator throws a validation error (e.g., if the input value is not a number for numeric operators), this no longer leads to a failure of the entire rule. - - The error will be added to the execution report. - - Failed operators will return no value. +- Close user menu automatically. +- Linking rule config: + - Add 'Inverse link type' parameter that defined a URI that is generated from the target to the source resource, i.e. the inverse of the 'link type' parameter. + - Add 'Is reflexive' parameter that when enabled does not link resources with themselves. +- In a workflow datasets with schema-less inputs, e.g. workflows, other datasets, are not considered to be outputs datasets anymore. +- Variable workflow API: + - Support uploading large input files via multipart/form-data request + - Support custom mime type "application/x-plugin-" in CONTENT-TYPE or ACCEPT in order to support all file based dataset plugins. + - Support query parameters 'config-dataSourceConfig-' and 'config-dataSinkConfig-' to configure dataset parameters of the data source and sink. +- Added read-only / uriProperty dataset attributes to endpoint responses. +- forward cmem-plugin-base to v4.1.0, via base image v2.2.0 +- The handling of errors in transform rules has been aligned with what is already shown in the evaluation: + - If a nested operator throws a validation error (e.g., if the input value is not a number for numeric operators), this no longer leads to a failure of the entire rule. + - The error will be added to the execution report. + - Failed operators will return no value. v23.2 of eccenca DataIntegration ships the following fixes: -- Excel plugins are not available (CMEM-5088). -- Transform/linking tasks with operators that use projects resources, e.g. Excel transform, cannot be copied/cloned to other projects (CMEM-5065). -- Using a target property ending in `/valueOf` for a value mapping rule breaks the mapping editor (CMEM-5059). -- Re-configured pure input dataset in a workflow should not be seen as an output dataset (CMEM-5058). -- Prioritized and blocking activities should be run in a fork join pool as well (CMEM-4856). -- Workflow(s) info endpoints return error (500) when a workflow is invalid (CMEM-5099): - - There is a 'warnings' property in the returned JSON that describes which information is missing and why. -- Remove broken legacy navigation menu from client error template that is shown e.g. when a user is not authorized to use DI (CMEM-4977). -- Mapping editor: the path suggestion that exactly matches the search query is not shown in the list of suggestions (CMEM-5084). +- Excel plugins are not available (CMEM-5088). +- Transform/linking tasks with operators that use projects resources, e.g. Excel transform, cannot be copied/cloned to other projects (CMEM-5065). +- Using a target property ending in `/valueOf` for a value mapping rule breaks the mapping editor (CMEM-5059). +- Re-configured pure input dataset in a workflow should not be seen as an output dataset (CMEM-5058). +- Prioritized and blocking activities should be run in a fork join pool as well (CMEM-4856). +- Workflow(s) info endpoints return error (500) when a workflow is invalid (CMEM-5099): + - There is a 'warnings' property in the returned JSON that describes which information is missing and why. +- Remove broken legacy navigation menu from client error template that is shown e.g. when a user is not authorized to use DI (CMEM-4977). +- Mapping editor: the path suggestion that exactly matches the search query is not shown in the list of suggestions (CMEM-5084). ## eccenca DataIntegration Python Plugins v4.1.0 @@ -111,10 +111,10 @@ Corporate Memory v23.2 includes the DataIntegration Python Plugins support in ve v4.1.0 of eccenca DataIntegration Python Plugins adds the following new features: -- use `post_resource` api in `write_to_dataset` function to update dataset file resource -- use cmempy 23.2 -- upgrade dependencies -- enforce usage of Python 3.11 +- use `post_resource` api in `write_to_dataset` function to update dataset file resource +- use cmempy 23.2 +- upgrade dependencies +- enforce usage of Python 3.11 ## eccenca DataManager v23.2 @@ -122,46 +122,46 @@ We are excited to announce the latest update to DataManager v23.2, which introdu v23.2 of eccenca DataManager adds the following new features: -- Added advanced options ("Inverse Linking Property" and "Irreflexive Linking") in a Link rule setup Dialog. -- Automatic layouts in Easynav. -- Configurable defaultTimeout for queries. +- Added advanced options ("Inverse Linking Property" and "Irreflexive Linking") in a Link rule setup Dialog. +- Automatic layouts in Easynav. +- Configurable defaultTimeout for queries. v23.2 of eccenca DataManager ships the following changes: -- Auto-close user menu. -- Delete Resource Dialog was overhauled, and can now trigger a `shui:onDeleteUpdate` query. -- Adjusted the payload when saving a custom or the default workspace config so that it has only the modified values. -- Better error message for empty string after stopwords remove in the Easynav module. -- Allow stale facets initially to improve loading timing. After stale facets are fetched, actual ones will be requested in background. -- Easynav: - - Long labels are shown in two lines. - - Save option for saving only the ontology changes. - - Context menu on resource nodes is re-enables. - - Query based entry into Easynav via the "graph" tab in explore. - - Context graph name is shown in breadcrumbs. - - Detailed filtering in the visualization catalog. - - Search bar centers on the node. - - All possible connections are shown for a node, not just the ones with data. - - Rich graph selection widget. -- Pathbuilder: - - Added subpaths option if `hierarchyEnable` is true. - - Added server search for subject and predicate. - - Added support for longer labels in resource selector. - - Added helper texts for subject selection and subpaths selection. - - Added depictions for subjects and predicates. - - Added different colors for subpaths. -- Explore/Properties View: - - Geo-coordinates are only shown, when a map server is configured. - - The wording of the error message when no selected graph found is changed. +- Auto-close user menu. +- Delete Resource Dialog was overhauled, and can now trigger a `shui:onDeleteUpdate` query. +- Adjusted the payload when saving a custom or the default workspace config so that it has only the modified values. +- Better error message for empty string after stopwords remove in the Easynav module. +- Allow stale facets initially to improve loading timing. After stale facets are fetched, actual ones will be requested in background. +- Easynav: + - Long labels are shown in two lines. + - Save option for saving only the ontology changes. + - Context menu on resource nodes is re-enables. + - Query based entry into Easynav via the "graph" tab in explore. + - Context graph name is shown in breadcrumbs. + - Detailed filtering in the visualization catalog. + - Search bar centers on the node. + - All possible connections are shown for a node, not just the ones with data. + - Rich graph selection widget. +- Pathbuilder: + - Added subpaths option if `hierarchyEnable` is true. + - Added server search for subject and predicate. + - Added support for longer labels in resource selector. + - Added helper texts for subject selection and subpaths selection. + - Added depictions for subjects and predicates. + - Added different colors for subpaths. +- Explore/Properties View: + - Geo-coordinates are only shown, when a map server is configured. + - The wording of the error message when no selected graph found is changed. v23.2 of eccenca DataIntegration ships the following fixes: -- Replaced an obsolete `LabelResolutionApi` by RTK-Query to catch 401 errors (lost session/authorization) and bring cache (CMEM-4979). -- Restored notifications toast (CMEM-4979). -- Enabled deletion of large tables (CMEM-4898). -- Added `mapServer` options in explore config (CMEM-4926). -- Error message shows when trying to upload broken file to the graph (CMEM-4704). -- Fixed error with lost graph list tabs on workspace switch (CMEM-5087). +- Replaced an obsolete `LabelResolutionApi` by RTK-Query to catch 401 errors (lost session/authorization) and bring cache (CMEM-4979). +- Restored notifications toast (CMEM-4979). +- Enabled deletion of large tables (CMEM-4898). +- Added `mapServer` options in explore config (CMEM-4926). +- Error message shows when trying to upload broken file to the graph (CMEM-4704). +- Fixed error with lost graph list tabs on workspace switch (CMEM-5087). ## eccenca DataPlatform v23.2.1 @@ -169,61 +169,61 @@ We're excited to bring you the latest update to DataPlatform v23.2, featuring nu v23.2.1 of eccenca DataPlatform ships following fixes: -- Bootstrap Data: Removed obsolete DataIntegration vocabulary from shape catalog +- Bootstrap Data: Removed obsolete DataIntegration vocabulary from shape catalog v23.2 of eccenca DataPlatform adds the following new features: -- Bootstrap Data: allow `sh:order` for `sh:NodesShape`. -- Dynamic access conditions backend functionality. -- Add optional facets request param for getting (possibly) stale cached values. -- Provenance metadata for Easynav visualizations. -- Added property `hierarchyEnable` to Link Rule Modul. -- Added properties for `mapServer` to Explore Modul. -- Add node shape property shape for `onDeleteUpdate` of resource. -- Added DI vocabulary. -- Added provenance metadata to visualization catalogue entries. +- Bootstrap Data: allow `sh:order` for `sh:NodesShape`. +- Dynamic access conditions backend functionality. +- Add optional facets request param for getting (possibly) stale cached values. +- Provenance metadata for Easynav visualizations. +- Added property `hierarchyEnable` to Link Rule Modul. +- Added properties for `mapServer` to Explore Modul. +- Add node shape property shape for `onDeleteUpdate` of resource. +- Added DI vocabulary. +- Added provenance metadata to visualization catalogue entries. v23.2 of eccenca DataPlatform ships the following changes: -- PropertyUsage endpoint delivers language tags. -- Make order of node shapes editable. -- Resolving of depictions for resource along node shape order. -- Viewing labels of NodeShapes as `SHACL.Name` - changed from `RDFS.Label`. -- Resource deletion applies Symmetric Concise Bound Description including incoming links. -- Show full list of access conditions for users with access condition management action. -- Add configuration option to switch between ontodia / graph tab in explore view. +- PropertyUsage endpoint delivers language tags. +- Make order of node shapes editable. +- Resolving of depictions for resource along node shape order. +- Viewing labels of NodeShapes as `SHACL.Name` - changed from `RDFS.Label`. +- Resource deletion applies Symmetric Concise Bound Description including incoming links. +- Show full list of access conditions for users with access condition management action. +- Add configuration option to switch between ontodia / graph tab in explore view. v23.2 of eccenca DataPlatform ships following fixes: -- Prevent upload of incorrect file URIs (CMEM-4360). -- Reintroduce `defaultTimeout` for UI queries (CMEM-5100). -- Make property `defaultGraph` able to be overwritten in custom workspace (CMEM-4902). -- Fix labels of node shapes from `RDFS.Label` to `SHACL.Name` (CMEM-4743). +- Prevent upload of incorrect file URIs (CMEM-4360). +- Reintroduce `defaultTimeout` for UI queries (CMEM-5100). +- Make property `defaultGraph` able to be overwritten in custom workspace (CMEM-4902). +- Fix labels of node shapes from `RDFS.Label` to `SHACL.Name` (CMEM-4743). ## eccenca Corporate Memory Control (cmemc) v23.2 v23.2 of eccenca Corporate Memory Control adds the following new features: -- `admin user password` command: - - option `--request-change` added, to send a email to user to reset the password -- `dataset create` command: - - add `readOnly` and `uriProperty` keys for the `-p/--parameter` option -- `admin client` command group: - - `list` command - list client accounts - - `open` command - Open clients in the browser - - `secret` command - Get or generate a new secret for a client account -- `project create` command: - - new option `--from-transformation` to create a mapping suggestion project +- `admin user password` command: + - option `--request-change` added, to send a email to user to reset the password +- `dataset create` command: + - add `readOnly` and `uriProperty` keys for the `-p/--parameter` option +- `admin client` command group: + - `list` command - list client accounts + - `open` command - Open clients in the browser + - `secret` command - Get or generate a new secret for a client account +- `project create` command: + - new option `--from-transformation` to create a mapping suggestion project v23.2 of eccenca Corporate Memory Control introduces the following changes: -- `dataset upload` command: - - use new endpoint which is aware of read-only datasets -- `workflow io` command: - - use of extended io endpoint - - allows for uploading bigger files - - allows for more input and output mimetypes - - change default output to JSON +- `dataset upload` command: + - use new endpoint which is aware of read-only datasets +- `workflow io` command: + - use of extended io endpoint + - allows for uploading bigger files + - allows for more input and output mimetypes + - change default output to JSON ## Migration Notes diff --git a/docs/release-notes/corporate-memory-23-3/index.md b/docs/release-notes/corporate-memory-23-3/index.md index 05719914e..edc852c75 100644 --- a/docs/release-notes/corporate-memory-23-3/index.md +++ b/docs/release-notes/corporate-memory-23-3/index.md @@ -12,21 +12,21 @@ Corporate Memory 23.3.2 is the second patch release in the 23.3 release line. The highlights of this release are: -- Explore and Author: - - new **[charts catalog](../../explore-and-author/charts-catalog/index.md)** module added, which allows for defining BI widgets / charts which can be integrated into shapes - - preview release of our generative AI / LLM based **Ontology and Query Assistant** -- Build: - - operate BUILD like never before by using the new **keyboard shortcuts** (press "?" in the build module to learn the details) - - several **improvements to the workflows view**: create new datasets and other workflow-operators in place, dependencies and execution order is now explicitly modeled, show schema or ports -- Automate: - - new **`project variable` command group** plus several addition to existing commands +- Explore and Author: + - new **[charts catalog](../../explore-and-author/charts-catalog/index.md)** module added, which allows for defining BI widgets / charts which can be integrated into shapes + - preview release of our generative AI / LLM based **Ontology and Query Assistant** +- Build: + - operate BUILD like never before by using the new **keyboard shortcuts** (press "?" in the build module to learn the details) + - several **improvements to the workflows view**: create new datasets and other workflow-operators in place, dependencies and execution order is now explicitly modeled, show schema or ports +- Automate: + - new **`project variable` command group** plus several addition to existing commands This release delivers the following component versions: -- eccenca DataIntegration v23.3.2 -- eccenca DataManager v23.3.1 -- eccenca DataPlatform v23.3.1 -- eccenca Corporate Memory Control (cmemc) v23.3.0 +- eccenca DataIntegration v23.3.2 +- eccenca DataManager v23.3.1 +- eccenca DataPlatform v23.3.1 +- eccenca Corporate Memory Control (cmemc) v23.3.0 More detailed release notes for these versions are listed below. @@ -36,68 +36,68 @@ We're excited to bring you the latest update to DataIntegration v23.3, featuring v23.3.2 of DataIntegration ships the following fixes: -- Entities with values larger than 65k cannot be serialized. -- JSON property path evaluation fails for missing key. +- Entities with values larger than 65k cannot be serialized. +- JSON property path evaluation fails for missing key. v23.3.1 of DataIntegration ships the following improvements: -- Workflow operator validates XML datasets against a provided XML Schema. -- Support entering a custom ID when cloning a project or project task. -- "Evaluate Template" operator has a new option for evaluating the template on the entire input set at once. +- Workflow operator validates XML datasets against a provided XML Schema. +- Support entering a custom ID when cloning a project or project task. +- "Evaluate Template" operator has a new option for evaluating the template on the entire input set at once. v23.3.1 of DataIntegration ships the following fixes: -- Long task parameter values cannot be fully seen in task config preview. -- Project export does not fail if project files cannot be read. -- Unexpected inputs of a node are not executed anymore. -- Transform report does not count entities in child mappings. -- Rule operator parameter auto-complete default values do not have a label when creating a new operator. +- Long task parameter values cannot be fully seen in task config preview. +- Project export does not fail if project files cannot be read. +- Unexpected inputs of a node are not executed anymore. +- Transform report does not count entities in child mappings. +- Rule operator parameter auto-complete default values do not have a label when creating a new operator. v23.3.0 of DataIntegration adds the following new features: -- plugin base library updated to v4.3.0 ([changelog](https://github.com/eccenca/cmem-plugin-base/blob/main/CHANGELOG.md)) -- Support for custom plugin icon. -- New `distinct by` Workflow operator that removes duplicated entities based on a user-defined path. -- Endpoint to download the DataIntegration vocabulary. -- Mappings allow custom target types - - If the new custom data type is selected, a new input field _type_ allows the user to enter a type URI. -- Added variables widget to Workflow view. -- Added hotkey access to actions in DI workspace. -- Workflow editor: - - Validate ports and connections and show warnings for found issues. - - Have menu option to show input/output schema for ports that either expect or output a fixed schema. - - Support dependency connections between workflow nodes to specify non-data execution dependencies. +- plugin base library updated to v4.3.0 ([changelog](https://github.com/eccenca/cmem-plugin-base/blob/main/CHANGELOG.md)) +- Support for custom plugin icon. +- New `distinct by` Workflow operator that removes duplicated entities based on a user-defined path. +- Endpoint to download the DataIntegration vocabulary. +- Mappings allow custom target types + - If the new custom data type is selected, a new input field _type_ allows the user to enter a type URI. +- Added variables widget to Workflow view. +- Added hotkey access to actions in DI workspace. +- Workflow editor: + - Validate ports and connections and show warnings for found issues. + - Have menu option to show input/output schema for ports that either expect or output a fixed schema. + - Support dependency connections between workflow nodes to specify non-data execution dependencies. v23.3.0 of DataIntegration introduces the following changes: -- The threshold field for distance measures has been improved: - - For boolean distance measures, the threshold is not shown as it has no effect. - - For normalized and unbound measures, the range of allowed values as well as an improved tooltip has been added. - - A error message is shown if the entered threshold is not valid for the given distance measure. -- Parse JSON operator now works with multiple entities. -- Updating or deleting project variables will update all affected tasks transactionally: - - The user is not allowed to delete a variable that is used in a task parameter template. - - The user is not allowed to change a variable to a value that violates restrictions in a task that uses it - - If the task parameter has a specific type (such as integer) and the template now evaluates to an incompatible type. - - If the task imposes other restrictions on a parameter (for instance, does not allow values below 0). -- base image switch to bookworm (python:3.11-slim-bookworm) -- If a path uses a property filter, an error will be thrown on writing data to a Knowledge Graph, if the property is not a valid URI. -- Support expanding all rule trees after expanding all rows in the transform evaluation. -- Change input ports definition of `Upload File to Knowledge Graph` operator from an empty fixed schema to variable inputs to make it better compatible. -- Transform page: When switching between tabs, e.g. from mapping editor to evaluation tab, the currently active rule stays active. -- JSON `#text` path now returns the formatted JSON as documented. +- The threshold field for distance measures has been improved: + - For boolean distance measures, the threshold is not shown as it has no effect. + - For normalized and unbound measures, the range of allowed values as well as an improved tooltip has been added. + - A error message is shown if the entered threshold is not valid for the given distance measure. +- Parse JSON operator now works with multiple entities. +- Updating or deleting project variables will update all affected tasks transactionally: + - The user is not allowed to delete a variable that is used in a task parameter template. + - The user is not allowed to change a variable to a value that violates restrictions in a task that uses it + - If the task parameter has a specific type (such as integer) and the template now evaluates to an incompatible type. + - If the task imposes other restrictions on a parameter (for instance, does not allow values below 0). +- base image switch to bookworm (python:3.11-slim-bookworm) +- If a path uses a property filter, an error will be thrown on writing data to a Knowledge Graph, if the property is not a valid URI. +- Support expanding all rule trees after expanding all rows in the transform evaluation. +- Change input ports definition of `Upload File to Knowledge Graph` operator from an empty fixed schema to variable inputs to make it better compatible. +- Transform page: When switching between tabs, e.g. from mapping editor to evaluation tab, the currently active rule stays active. +- JSON `#text` path now returns the formatted JSON as documented. v23.2 of DataIntegration ships the following fixes: -- Project variables widget showing the variables of the wrong project. -- Python package uninstall is not able to remove crucial packages anymore. -- Reload failed tasks after project/workspace reload. -- Reloaded failed tasks are missing the original label and description. -- Fix property pair alignment. -- Hotkey and quick search modal are not always shown on top of other modals. -- Testing for invalid documents downloaded from GDocs fixed and adapted to new behavior of wrong requests. -- Error message in 'SPARQL endpoint' plugin to mention prohibited URL redirect to a different protocol. -- `JDBC endpoint` dataset: Setting the user via JDBC URL while leaving the user parameter blank does not work. +- Project variables widget showing the variables of the wrong project. +- Python package uninstall is not able to remove crucial packages anymore. +- Reload failed tasks after project/workspace reload. +- Reloaded failed tasks are missing the original label and description. +- Fix property pair alignment. +- Hotkey and quick search modal are not always shown on top of other modals. +- Testing for invalid documents downloaded from GDocs fixed and adapted to new behavior of wrong requests. +- Error message in 'SPARQL endpoint' plugin to mention prohibited URL redirect to a different protocol. +- `JDBC endpoint` dataset: Setting the user via JDBC URL while leaving the user parameter blank does not work. ## eccenca DataManager v23.3.1 @@ -105,28 +105,28 @@ We are excited to announce the latest update to DataManager v23.3, which introdu v23.3.1 of DataManager ships the following fixes: -- _Workspace configuration:_ explore `defaultGraph` is now correctly evaluated. +- _Workspace configuration:_ explore `defaultGraph` is now correctly evaluated. v23.3.0 of DataManager adds the following new features: -- Implemented Charts module with Shacl integration. -- Added option to show edges without the shapes on the EasyNav canvas and in the sidebar, i.e. the node expansion is still shaped. -- Query module allows simple query creation with an form assisted dialogue. +- Implemented Charts module with Shacl integration. +- Added option to show edges without the shapes on the EasyNav canvas and in the sidebar, i.e. the node expansion is still shaped. +- Query module allows simple query creation with an form assisted dialogue. v23.3.0 of DataManager ships the following changes: -- Internal: - - Query module is migrated from Redux to a Context storage. - - Query module is extracted to a separate common component. -- ResourceSelect doesn't request options anymore if they have already been requested earlier. -- `shui:listQuery` allows usage of the `{{username}}` placeholder, which is replaced by the name (i.e.not the IRI) of the logged in user. +- Internal: + - Query module is migrated from Redux to a Context storage. + - Query module is extracted to a separate common component. +- ResourceSelect doesn't request options anymore if they have already been requested earlier. +- `shui:listQuery` allows usage of the `{{username}}` placeholder, which is replaced by the name (i.e.not the IRI) of the logged in user. v23.3.0 of DataManager ships the following fixes: -- Fixed broken navigation (workspace part of URL was lost). -- CMEM Manual Testing 23.2 e2e - Don't do redundant redirects in the Module context. -- Navigation tree in the Thesaurus module was collapsed after a subitem select. -- Use more space for visualization catalogue if available. +- Fixed broken navigation (workspace part of URL was lost). +- CMEM Manual Testing 23.2 e2e - Don't do redundant redirects in the Module context. +- Navigation tree in the Thesaurus module was collapsed after a subitem select. +- Use more space for visualization catalogue if available. ## eccenca DataPlatform v23.3.1 @@ -134,37 +134,37 @@ We're excited to bring you the latest update to DataPlatform v23.3, featuring nu v23.3.1 of DataPlatform ships following fixes: -- Backport of fix for URI Template ordering, when storing shaped resource + sub-resource with uriTemplate for each +- Backport of fix for URI Template ordering, when storing shaped resource + sub-resource with uriTemplate for each v23.3.0 of DataPlatform ships following fixes: -- Fixed non-working query cancelling in GraphDb 10.3 -- Wrong caching on facet query calls +- Fixed non-working query cancelling in GraphDb 10.3 +- Wrong caching on facet query calls v23.3.0 of DataPlatform adds the following new features: -- Added endpoint for removal of system resources i.e. bootstrap data -- GraphDb embedded development build +- Added endpoint for removal of system resources i.e. bootstrap data +- GraphDb embedded development build v23.3.0 of DataPlatform ships the following changes: -- Dataplatform health check update: - - activation of spring boot kubernetes health groups `liveness/readiness`. - - creation of health group `sparql` which can be (de)activated: - - `management.health.sparql.enabled`: (De)activates check from DP to store backend (default: true). - - `management.health.sparql.fixedDelayInMilliseconds`: delay in ms between store checks (default: 5000). - - `management.health.sparql.timeoutInMilliseconds`: timeout on how long to wait for store to answer check request (default: 5000). - - health group `sparql` contributes to readiness state / overall health endpoint. - - GraphDB health check uses Gdb endpoint for repository. -- Charts configuration API and Shacl integration. -- Breaking change: remove property `authorization.abox.prefix` (fixed default: ). -- Workspace configuration adjustments: - - `Application presentation` added properties companyName, applicationName, bannerBackgroundColor. - - `EasyNav module` added property shapePropertyView. - - Chart configuration module. - - Change default system workspace values. -- Spring Boot v3.1.x -- Support for the `{label}` parameter in uri templates. +- Dataplatform health check update: + - activation of spring boot kubernetes health groups `liveness/readiness`. + - creation of health group `sparql` which can be (de)activated: + - `management.health.sparql.enabled`: (De)activates check from DP to store backend (default: true). + - `management.health.sparql.fixedDelayInMilliseconds`: delay in ms between store checks (default: 5000). + - `management.health.sparql.timeoutInMilliseconds`: timeout on how long to wait for store to answer check request (default: 5000). + - health group `sparql` contributes to readiness state / overall health endpoint. + - GraphDB health check uses Gdb endpoint for repository. +- Charts configuration API and Shacl integration. +- Breaking change: remove property `authorization.abox.prefix` (fixed default: ). +- Workspace configuration adjustments: + - `Application presentation` added properties companyName, applicationName, bannerBackgroundColor. + - `EasyNav module` added property shapePropertyView. + - Chart configuration module. + - Change default system workspace values. +- Spring Boot v3.1.x +- Support for the `{label}` parameter in uri templates. ## eccenca Corporate Memory Control (cmemc) v23.3.0 @@ -172,31 +172,31 @@ We're excited to bring you the latest update to Corporate Memory Control (cmemc) v23.3.0 of Corporate Memory Control adds the following new features: -- `project variable` command group - - `create` command - create a new project variable - - `delete` command - delete a project variable - - `get` command - get the value or other data of a project variable - - `list` command - list available project variables - - `update` command - update data of an existing project variable -- `admin workspace python` command group - - `open` command - open a package pypi.org page in the browser - - `list --available` option - list published packages - - `uninstall --all` option - reset the whole python environment -- `project` command group - - `create --label` option - give a label for the created project - - `create --description` option - give a description for the created project -- `dataset` command group - - `update` command - update the configuration of an existing dataset -- `workflow` command group - - `execute --progress` option - show a progress bar -- `admin store` command group - - `bootstrap --remove` option - delete the bootstrap data +- `project variable` command group + - `create` command - create a new project variable + - `delete` command - delete a project variable + - `get` command - get the value or other data of a project variable + - `list` command - list available project variables + - `update` command - update data of an existing project variable +- `admin workspace python` command group + - `open` command - open a package pypi.org page in the browser + - `list --available` option - list published packages + - `uninstall --all` option - reset the whole python environment +- `project` command group + - `create --label` option - give a label for the created project + - `create --description` option - give a description for the created project +- `dataset` command group + - `update` command - update the configuration of an existing dataset +- `workflow` command group + - `execute --progress` option - show a progress bar +- `admin store` command group + - `bootstrap --remove` option - delete the bootstrap data v23.3.0 of Corporate Memory Control introduces the following changes: -- `workflow execute` command - more debug info when polling for workflow info -- Upgrade to `click` v8 (see Migration Notes). -- Upgrade to debian 12 based image: `3.11.6-slim-bookworm` +- `workflow execute` command - more debug info when polling for workflow info +- Upgrade to `click` v8 (see Migration Notes). +- Upgrade to debian 12 based image: `3.11.6-slim-bookworm` ## Migration Notes @@ -226,9 +226,9 @@ Due to the removal of the `authorization.abox.prefix` configuration option, a ch From v23.3 `AccessCondition`s are only regarded if their IRIs use the prefix `http://eccenca.com/` (e.g. have an IRI like `http://eccenca.com/170f25c2-3b92-40d7-b247-5bba42dbe22a`). Required action: -- If you have been using a different prefix for your `AccessCondition`s, change the prefix of these resources. E.g. by: - - search / replace the old prefix with the new one in your RDF graph backup - - using a `SPARQL query` like: +- If you have been using a different prefix for your `AccessCondition`s, change the prefix of these resources. E.g. by: + - search / replace the old prefix with the new one in your RDF graph backup + - using a `SPARQL query` like: ```sql PREFIX eccauth: @@ -262,6 +262,6 @@ From v23.3 `AccessCondition`s are only regarded if their IRIs use the prefix `ht ### cmemc -- The upgrade to `click` v8 involves new completion functions (see [completion manual](../../automate/cmemc-command-line-interface/configuration/completion-setup/index.md)) - - Old: `_CMEMC_COMPLETE=source_zsh cmemc` - - New: `_CMEMC_COMPLETE=zsh_source cmemc` +- The upgrade to `click` v8 involves new completion functions (see [completion manual](../../automate/cmemc-command-line-interface/configuration/completion-setup/index.md)) + - Old: `_CMEMC_COMPLETE=source_zsh cmemc` + - New: `_CMEMC_COMPLETE=zsh_source cmemc` diff --git a/docs/release-notes/corporate-memory-24-1/index.md b/docs/release-notes/corporate-memory-24-1/index.md index f97f7b784..99a37f5fa 100644 --- a/docs/release-notes/corporate-memory-24-1/index.md +++ b/docs/release-notes/corporate-memory-24-1/index.md @@ -12,26 +12,26 @@ Corporate Memory 24.1.3 is the third patch release in the 24.1 release line. The highlights of this release are: -- Build: - - New improved **REST operator** (v2) with lots of additional features - - Extendend **Keyboard Shortcuts** in workflow editor -- Automate: - - New **`admin acl` command group** to automate management of access conditions - - New **`graph validation` command group** to automate batch validation of graph resources against SHACL shapes -- Explore and Author: - - Preview of our new **SHACL Authoring Engine** (enable with feature flag `shacl2` on your workspace configuration: `Basics`>`Workspace`>`featureFlags`) +- Build: + - New improved **REST operator** (v2) with lots of additional features + - Extendend **Keyboard Shortcuts** in workflow editor +- Automate: + - New **`admin acl` command group** to automate management of access conditions + - New **`graph validation` command group** to automate batch validation of graph resources against SHACL shapes +- Explore and Author: + - Preview of our new **SHACL Authoring Engine** (enable with feature flag `shacl2` on your workspace configuration: `Basics`>`Workspace`>`featureFlags`) This release delivers the following component versions: -- eccenca DataIntegration v24.1.1 -- eccenca DataManager v24.1.3 -- eccenca DataPlatform v24.1.2 -- eccenca Corporate Memory Control (cmemc) v24.1.4 +- eccenca DataIntegration v24.1.1 +- eccenca DataManager v24.1.3 +- eccenca DataPlatform v24.1.2 +- eccenca Corporate Memory Control (cmemc) v24.1.4 We tested this release with the following dependency components: -- Ontotext GraphDB v10.6.2 -- Keycloak v24.0.3 +- Ontotext GraphDB v10.6.2 +- Keycloak v24.0.3 More detailed information for this release is provided in the next sections. @@ -41,96 +41,96 @@ We're excited to bring you the latest update to DataIntegration v24.1, which int v24.1.2 of DataIntegration introduces the following changes: -- Description fields provide a Markdown-editor now. -- Removed Dependency ports menu item from transform and linking nodes. +- Description fields provide a Markdown-editor now. +- Removed Dependency ports menu item from transform and linking nodes. v24.1.2 of DataIntegration ships the following fixes: -- Removed bloated Amazon AWS bundle from dependencies. -- Include Snowflake JDBC driver. -- docker image: bump zlibg to mitigate CVE-2023-45853. -- docker image: remove libaom to mitigate CVE-2023-6879. -- Cannot create XML with defined DTD in output template. -- Fixed upload process in DI project files widget. +- Removed bloated Amazon AWS bundle from dependencies. +- Include Snowflake JDBC driver. +- docker image: bump zlibg to mitigate CVE-2023-45853. +- docker image: remove libaom to mitigate CVE-2023-6879. +- Cannot create XML with defined DTD in output template. +- Fixed upload process in DI project files widget. v24.1.1 of DataIntegration adds the following new features: -- A new facet has been added to the workspace search that allows to filter for read-only dataset -- The "Evaluate template" operator now supports hierarchical input entities if full evaluation is set -- Better preview of hierarchical formats, such as XML and JSON +- A new facet has been added to the workspace search that allows to filter for read-only dataset +- The "Evaluate template" operator now supports hierarchical input entities if full evaluation is set +- Better preview of hierarchical formats, such as XML and JSON v24.1.1 of DataIntegration introduces the following changes: -- Icon of notification menu was aligned to DM, it's now a bell. +- Icon of notification menu was aligned to DM, it's now a bell. v24.1.1 of DataIntegration ships the following fixes: -- Fixed various vulnerabilities -- AWS S3 workspace: IO Error Attempted read on closed stream -- Secret values (passwords) in DI task configurations not shown to users once entered -- The create project endpoint returns a custom error format instead of HTTP problem details -- Notification menu was fixed regarding its opening and closing behavior. -- XML Dataset produces wrong tags if the target property is a full URI -- Macro support for Jinja templates +- Fixed various vulnerabilities +- AWS S3 workspace: IO Error Attempted read on closed stream +- Secret values (passwords) in DI task configurations not shown to users once entered +- The create project endpoint returns a custom error format instead of HTTP problem details +- Notification menu was fixed regarding its opening and closing behavior. +- XML Dataset produces wrong tags if the target property is a full URI +- Macro support for Jinja templates v24.1.0 of DataIntegration adds the following new features: -- Multiline editing of template values -- Added loose connection of workflow nodes similar to linking editor -- XML, JSON, Excel and CSV datasets support retrieving the line and column numbers -- Error report for (validation) errors in transform and linking rule editors and transform execution report - - Shows additional details like a stacktrace and input values -- Added hotkey integration for creating new items in the workflow editor -- Improved REST operator (v2) - - With support for multiple REST requests, one per input entity - - Paging support: If the API does not return all results in a single request, this features allows to page via multiple requests and merge the results of all requests - - Better error handling and retry mechanism: Retries requests and collects errors for execution report - - Rate limiting of requests by setting a delay between subsequent requests - - Limit and offset: Only executes a specific "window" of the input entities/requests - - URL property: Allows to define a property that is injected into the result JSON that contains the original request URL - - Support dataset file output, i.e. a file based dataset can be connected to the operator output, which overwrites the dataset file with the results from the REST requests - - This allows to handle REST results as any dataset content - - Supports zip files. If a dataset (currently JSON, XML, RDF file, CSV) specifies a zip file (ending in .zip) a zip archive is written that contains one file per request result -- JSON dataset - - Support bulk resources, i.e. JSON files in a zip file - - Support reading JSON Lines files -- Python workflow plugins can now consume and produce hierarchical entities -- Additions to the workflow configuration ports: - - Allow to reconfigure transform and linking tasks in workflows - - Datasets can be connected directly to the configuration port -- Extended auto-completion support when opening the mapping (rule) editor in a workflow context: - - Support auto-completion of target properties for fixed target schema and config port schema (transformation connected to config port) - - Support auto-completion of values paths for fixed input schema -- Added timer for workflow execution and in activity view -- Error notification - - Add badge to error notification menu icon with error count. +- Multiline editing of template values +- Added loose connection of workflow nodes similar to linking editor +- XML, JSON, Excel and CSV datasets support retrieving the line and column numbers +- Error report for (validation) errors in transform and linking rule editors and transform execution report + - Shows additional details like a stacktrace and input values +- Added hotkey integration for creating new items in the workflow editor +- Improved REST operator (v2) + - With support for multiple REST requests, one per input entity + - Paging support: If the API does not return all results in a single request, this features allows to page via multiple requests and merge the results of all requests + - Better error handling and retry mechanism: Retries requests and collects errors for execution report + - Rate limiting of requests by setting a delay between subsequent requests + - Limit and offset: Only executes a specific "window" of the input entities/requests + - URL property: Allows to define a property that is injected into the result JSON that contains the original request URL + - Support dataset file output, i.e. a file based dataset can be connected to the operator output, which overwrites the dataset file with the results from the REST requests + - This allows to handle REST results as any dataset content + - Supports zip files. If a dataset (currently JSON, XML, RDF file, CSV) specifies a zip file (ending in .zip) a zip archive is written that contains one file per request result +- JSON dataset + - Support bulk resources, i.e. JSON files in a zip file + - Support reading JSON Lines files +- Python workflow plugins can now consume and produce hierarchical entities +- Additions to the workflow configuration ports: + - Allow to reconfigure transform and linking tasks in workflows + - Datasets can be connected directly to the configuration port +- Extended auto-completion support when opening the mapping (rule) editor in a workflow context: + - Support auto-completion of target properties for fixed target schema and config port schema (transformation connected to config port) + - Support auto-completion of values paths for fixed input schema +- Added timer for workflow execution and in activity view +- Error notification + - Add badge to error notification menu icon with error count. v24.1.0 of DataIntegration introduces the following changes: -- Show project variables re-ordering errors (with details) directly in project variables widget -- Support PATCH and DELETE requests in REST operators -- Upgraded libraries, in particular Play to v2.9.1 and Spark to v3.5.0 -- Support of custom tasks as input for transform and linking tasks -- Create/update dialogue: - - When a parameter value is changed that other parameters are depending on, those parameter values are reset because they might not be valid anymore -- Shortened workflow execution failure message shown in activity widget -- Added `Fail workflow` flag to `Cancel workflow` operator +- Show project variables re-ordering errors (with details) directly in project variables widget +- Support PATCH and DELETE requests in REST operators +- Upgraded libraries, in particular Play to v2.9.1 and Spark to v3.5.0 +- Support of custom tasks as input for transform and linking tasks +- Create/update dialogue: + - When a parameter value is changed that other parameters are depending on, those parameter values are reset because they might not be valid anymore +- Shortened workflow execution failure message shown in activity widget +- Added `Fail workflow` flag to `Cancel workflow` operator v24.1.0 of DataIntegration ships the following fixes: -- Many errors occurring in a form/modal, e.g. from requests, are hidden because they are shown in the global error notification which cannot be accessed while the form is open -- Missing or problematic error handling in several forms and other places -- Transform editor should show plugin labels instead of ids -- Transform execution report validation icons in mapping tree do not update after running the execution -- When upgrading a plugin, new parameters are not shown in transform editor -- Workflow editor: Creating a new connected task that has no input port connects to the output port -- Copying a project with custom prefixes into a project that misses these prefixes fails -- Workflow report always states `...has not finished execution yet.` -- Cannot add a new project variable after having tried to add it with an empty value -- Support for ARM64 architecture -- View completely crashes when error is not caught in any tab view (plugin) - there should be an error boundary -- Mapping editor shows spinner when no network is available when switching to it -- Linking editor does not load when network unavailable instead of showing error +- Many errors occurring in a form/modal, e.g. from requests, are hidden because they are shown in the global error notification which cannot be accessed while the form is open +- Missing or problematic error handling in several forms and other places +- Transform editor should show plugin labels instead of ids +- Transform execution report validation icons in mapping tree do not update after running the execution +- When upgrading a plugin, new parameters are not shown in transform editor +- Workflow editor: Creating a new connected task that has no input port connects to the output port +- Copying a project with custom prefixes into a project that misses these prefixes fails +- Workflow report always states `...has not finished execution yet.` +- Cannot add a new project variable after having tried to add it with an empty value +- Support for ARM64 architecture +- View completely crashes when error is not caught in any tab view (plugin) - there should be an error boundary +- Mapping editor shows spinner when no network is available when switching to it +- Linking editor does not load when network unavailable instead of showing error ## eccenca DataManager v24.1.3 @@ -138,49 +138,49 @@ We are excited to announce the latest update to DataManager v24.1, which introdu v24.1.3 of DataManager ships the following fixes: -- docker image: bump zlib package to mitigate CVE-2023-45853 +- docker image: bump zlib package to mitigate CVE-2023-45853 v24.1.2 of DataManager ships the following fixes: -- Version string is no longer suffixed by the dirty flag due to re-generated clients +- Version string is no longer suffixed by the dirty flag due to re-generated clients v24.1.1 of DataManager ships the following fixes: -- Inline View is used when opening a Knowledge Graph Dataset in DataIntegration -- Delete Thesaurus dialog is now working as expected -- Order of graph lists is respected, when determining the first graph to explore -- Fixed several issues with the unshaped-properties view mode of easynav, new visualizations and creating new, inverted, edges -- Hide license info for store, if no expiration date is available +- Inline View is used when opening a Knowledge Graph Dataset in DataIntegration +- Delete Thesaurus dialog is now working as expected +- Order of graph lists is respected, when determining the first graph to explore +- Fixed several issues with the unshaped-properties view mode of easynav, new visualizations and creating new, inverted, edges +- Hide license info for store, if no expiration date is available v24.1.0 of DataManager adds the following new features: -- License warnings for Corporate Memory and GraphDB license -- Added validation for invalid URI format in vocabulary registration form -- SHACL2 (beta feature, disable per default) - - support for literals - - support for object properties - - validation - - context graph -- SVG support for the object view -- A link to the DataPlatform API documentation +- License warnings for Corporate Memory and GraphDB license +- Added validation for invalid URI format in vocabulary registration form +- SHACL2 (beta feature, disable per default) + - support for literals + - support for object properties + - validation + - context graph +- SVG support for the object view +- A link to the DataPlatform API documentation v24.1.0 of DataManager ships the following changes: -- Explore Navigation Component, now supports depictions and pre-loading of the concepts list -- I18N - - Increased coverage - - Enabled nesting of the keys in translations - - Improvements in the application header in explore +- Explore Navigation Component, now supports depictions and pre-loading of the concepts list +- I18N + - Increased coverage + - Enabled nesting of the keys in translations + - Improvements in the application header in explore v24.1.0 of DataManager ships the following fixes: -- Security Update of Java wrapper -- Workspace selection resets module selection -- Considering `exploreModuleConfiguration.defaultGraph` during the Explore module mount -- Added navigation blocker for the EasyNav module -- Keeping EasyNav viewport parameters during visualization save -- Installing a vocabulary now fully refreshes the application state -- Workspaces, which are prefix of an other workspace, are now correctly handled +- Security Update of Java wrapper +- Workspace selection resets module selection +- Considering `exploreModuleConfiguration.defaultGraph` during the Explore module mount +- Added navigation blocker for the EasyNav module +- Keeping EasyNav viewport parameters during visualization save +- Installing a vocabulary now fully refreshes the application state +- Workspaces, which are prefix of an other workspace, are now correctly handled ## eccenca DataPlatform v24.1.2 @@ -188,41 +188,41 @@ We're excited to bring you the latest update to DataPlatform v24.1, which introd v24.1.2 of DataPlatform ships the following fixes: -- docker image: bump zlib package to mitigate CVE-2023-45853 +- docker image: bump zlib package to mitigate CVE-2023-45853 v24.1.1 of DataPlatform ships the following fixes: -- GraphDB license endpoints returns an empty value, if the GraphDB free is configured. +- GraphDB license endpoints returns an empty value, if the GraphDB free is configured. v24.1.0 of DataPlatform adds the following new features: -- Add license information to DataPlatform actuator info endpoint response -- Added endpoints for SHACL validation / Resource shaping - - SHACL validation and resource shaping - - endpoints for validation, node shape structure views and data retrieval - - SHACL batch validation - - added application property `scheduler.backgroundQueryPoolSize` (Default: 4) - - maximum numbers of threads for background jobs (i.e. SHACL batch validation) - - added application property `proxy.shaclBatchResultsMemoryBoundaryInMb` (Default: 100) - - amount in Megabytes (Mb) for SHACL batch validation results kept in memory for status retrieval -- Access condition review endpoint - - ability to check user rights (access conditions) for a set of groups +- Add license information to DataPlatform actuator info endpoint response +- Added endpoints for SHACL validation / Resource shaping + - SHACL validation and resource shaping + - endpoints for validation, node shape structure views and data retrieval + - SHACL batch validation + - added application property `scheduler.backgroundQueryPoolSize` (Default: 4) + - maximum numbers of threads for background jobs (i.e. SHACL batch validation) + - added application property `proxy.shaclBatchResultsMemoryBoundaryInMb` (Default: 100) + - amount in Megabytes (Mb) for SHACL batch validation results kept in memory for status retrieval +- Access condition review endpoint + - ability to check user rights (access conditions) for a set of groups v24.1.0 of DataPlatform ships the following changes: -- Static access condition prefix split for newly created access conditions - - `` – prefix for Access Condition Groups / Users - - `` – prefix for Access Conditions -- Added tracing id to audit logs -- Add feature flag field to workspace configuration -- Add support for GraphDB 10.5 +- Static access condition prefix split for newly created access conditions + - `` – prefix for Access Condition Groups / Users + - `` – prefix for Access Conditions +- Added tracing id to audit logs +- Add feature flag field to workspace configuration +- Add support for GraphDB 10.5 v24.1.0 of DataPlatform ships the following fixes: -- Allow blank nodes in update queries -- API endpoints do not return `null` values for unset fields anymore -- Correct documentation of API endpoints for named query execution -- Default language order is changed to: `["en", "", "de"]` +- Allow blank nodes in update queries +- API endpoints do not return `null` values for unset fields anymore +- Correct documentation of API endpoints for named query execution +- Default language order is changed to: `["en", "", "de"]` ## eccenca Corporate Memory Control (cmemc) v24.1.4 @@ -230,55 +230,55 @@ We're excited to bring you the latest update to Corporate Memory Control (cmemc) v24.1.4 of Corporate Memory Control (cmemc) ships the following fixes: -- restore python 3.10 compatibility +- restore python 3.10 compatibility v24.1.3 of Corporate Memory Control (cmemc) was a redacted build. v24.1.2 of Corporate Memory Control (cmemc) ships the following security patches: -- docker image: bump zlib1g to 1.3.dfsg+really1.3.1-1 to mitigate CVE-2023-45853 +- docker image: bump zlib1g to 1.3.dfsg+really1.3.1-1 to mitigate CVE-2023-45853 v24.1.1 of Corporate Memory Control (cmemc) ships the following fixes: -- In case of using env-only configuration + SSL_VERIFY=false - - InsecureRequestWarning output from urllib3 is now suppressed - - Normal user warning is given to stderr -- `admin workspace python install` command - - completion of plugin packages does not list non-plugin packages anymore +- In case of using env-only configuration + SSL_VERIFY=false + - InsecureRequestWarning output from urllib3 is now suppressed + - Normal user warning is given to stderr +- `admin workspace python install` command + - completion of plugin packages does not list non-plugin packages anymore v24.1.1 of Corporate Memory Control (cmemc) ships the following security updates: -- docker image: upgrade zlib package to 1:1.3.dfsg-3 in order to mitigate CVE-2023-45853 +- docker image: upgrade zlib package to 1:1.3.dfsg-3 in order to mitigate CVE-2023-45853 v24.1.0 of Corporate Memory Control (cmemc) adds the following new features: -- Added support for importing vocabulary from standard input (`stdin`) -- `admin acl` command group - - `create` command - Create an access condition - - `delete` command - Delete access conditions - - `inspect` command - Inspects the access condition - - `list` command - List all access conditions - - `review` command - Reviews the graph rights for a given access condition - - `update` command - Updates an access condition -- `graph validation` command group - - `execute` command - Start a validation process - - `inspect` command - Inspect validation process results - - `list` command - List validation processes - - `cancel` command - Cancel a running validation process -- `admin user list` command - - `--filter` option - filter user list -- `admin status` command - - raises an error if the Corporate Memory license is expired (grace period) - - raises a warning if the GraphDB license expires in less than one month -- `dataset create` command - - support to use JSON Lines files as JSON datasets - - support to use YAML files as TEXT datasets +- Added support for importing vocabulary from standard input (`stdin`) +- `admin acl` command group + - `create` command - Create an access condition + - `delete` command - Delete access conditions + - `inspect` command - Inspects the access condition + - `list` command - List all access conditions + - `review` command - Reviews the graph rights for a given access condition + - `update` command - Updates an access condition +- `graph validation` command group + - `execute` command - Start a validation process + - `inspect` command - Inspect validation process results + - `list` command - List validation processes + - `cancel` command - Cancel a running validation process +- `admin user list` command + - `--filter` option - filter user list +- `admin status` command + - raises an error if the Corporate Memory license is expired (grace period) + - raises a warning if the GraphDB license expires in less than one month +- `dataset create` command + - support to use JSON Lines files as JSON datasets + - support to use YAML files as TEXT datasets v24.1.0 of Corporate Memory Control (cmemc) ships the following changes: -- `graph import` command - - importing a directory to a single graph no longer raises an error but imports all turtle files to this graph -- docker image: python 3.11.8 +- `graph import` command + - importing a directory to a single graph no longer raises an error but imports all turtle files to this graph +- docker image: python 3.11.8 ## Migration Notes @@ -294,7 +294,7 @@ v24.1.0 of Corporate Memory Control (cmemc) ships the following changes: There is a known issue and existing workaround with the new dependency port feature: you may receive a message like this when running your workflows: -``` +```txt Workflow Execution Error: Not all workflow nodes were executed! Executed 2 of 7 nodes. ``` @@ -308,7 +308,7 @@ After saving it will work again. If the default workspace sets a `exploreModuleConfiguration.defaultGraph` but an additional workspace is configured to show graph lists, you might see the following error message: -``` +```txt Missing Graph configuration for Context. Please check that the graph "" actually exists. diff --git a/docs/release-notes/corporate-memory-24-2/index.md b/docs/release-notes/corporate-memory-24-2/index.md index 73e675526..b0d3285e9 100644 --- a/docs/release-notes/corporate-memory-24-2/index.md +++ b/docs/release-notes/corporate-memory-24-2/index.md @@ -12,26 +12,26 @@ Corporate Memory 24.2.1 is the first patch release in the 24.2 release line. The highlights of this release are: -- Explore and Author: - - All-new, re-written shacl custom UI rendering engine (shacl2) is now generally available and the system default - - Support for Sankey chart type. -- Build: - - Quick creation of file based datasets in the workflow editor - dropping files into the workflow editor will automatically create a new dataset. -- Automate: - - Extension to many **import commands** to allow for importing graphs, projects, datasets and vocabularies from the web - - Extension to the **graph validation export** command to produce JUnit XML reports for better integration into CI/CD pipelines +- Explore and Author: + - All-new, re-written shacl custom UI rendering engine (shacl2) is now generally available and the system default + - Support for Sankey chart type. +- Build: + - Quick creation of file based datasets in the workflow editor - dropping files into the workflow editor will automatically create a new dataset. +- Automate: + - Extension to many **import commands** to allow for importing graphs, projects, datasets and vocabularies from the web + - Extension to the **graph validation export** command to produce JUnit XML reports for better integration into CI/CD pipelines This release delivers the following component versions: -- eccenca DataIntegration v24.2.1 -- eccenca DataManager v24.2.2 -- eccenca DataPlatform v24.2.1 -- eccenca Corporate Memory Control (cmemc) v24.2.0 +- eccenca DataIntegration v24.2.1 +- eccenca DataManager v24.2.2 +- eccenca DataPlatform v24.2.1 +- eccenca Corporate Memory Control (cmemc) v24.2.0 We tested this release with the following dependency components: -- Ontotext GraphDB v10.7.2 -- Keycloak v25.0.6 +- Ontotext GraphDB v10.7.2 +- Keycloak v25.0.6 More detailed information for this release is provided in the next sections. @@ -41,55 +41,55 @@ We're excited to bring you the latest update to DataIntegration v24.2, which int v24.2.1 of DataIntegration ships the following fixes: -- Drag and drop in react flow editors used in Linking and Transform tasks work again. -- Dragging operators in the react flow based editors when text is selected leads to large artifacts. +- Drag and drop in react flow editors used in Linking and Transform tasks work again. +- Dragging operators in the react flow based editors when text is selected leads to large artifacts. v24.2.0 of DataIntegration adds the following new features: -- Quick creation of file based datasets in the workflow editor. - - Dropping files into the workflow editor will automatically create a new dataset. -- Button to reload all cache activities at once. -- The JDBC dataset can now be configured how it will write multiple values for a single property. A new strategy allows to write multiple rows in this case. -- Python: - - Added `WorkflowContext` that allows plugins to access the workflow identifier as well as the current execution status. - - Added `packageName` attribute to the plugin JSON. +- Quick creation of file based datasets in the workflow editor. + - Dropping files into the workflow editor will automatically create a new dataset. +- Button to reload all cache activities at once. +- The JDBC dataset can now be configured how it will write multiple values for a single property. A new strategy allows to write multiple rows in this case. +- Python: + - Added `WorkflowContext` that allows plugins to access the workflow identifier as well as the current execution status. + - Added `packageName` attribute to the plugin JSON. v24.2.0 of DataIntegration introduces the following changes: -- Re-use original parameters of a replaceable dataset if the dataset type matches with the requested one in a variable workflow request. -- Support Turtle files for the graph file upload operator (No chunking supported) -- Transform object header has been separated from its properties to improve visual appearance. -- Rule editors: Add generic path operator to input path tabs. -- When trying to execute an unsaved workflow notify the user that the workflow will be saved with the option to not show the dialog again. -- Re-added reload button to data preview to get updated content on config and data changes. -- Removed URL resource manager. URLs as file names won't be resolved anymore, e.g. for dataset inputs. -- The RDF datasets will always write the schema type for each entity. +- Re-use original parameters of a replaceable dataset if the dataset type matches with the requested one in a variable workflow request. +- Support Turtle files for the graph file upload operator (No chunking supported) +- Transform object header has been separated from its properties to improve visual appearance. +- Rule editors: Add generic path operator to input path tabs. +- When trying to execute an unsaved workflow notify the user that the workflow will be saved with the option to not show the dialog again. +- Re-added reload button to data preview to get updated content on config and data changes. +- Removed URL resource manager. URLs as file names won't be resolved anymore, e.g. for dataset inputs. +- The RDF datasets will always write the schema type for each entity. v24.2.0 of DataIntegration ships the following fixes: -- Workflow Task: exception message prefixed with wrong name. -- Workflow editor: - - Workflow nodes with only a single dependency output might be executed twice. - - Loose connections from dependency ports not working anymore. - - Allow dependency connection from/to replaceable datasets. - - Drawing dependency connections from input dependency port results in unexpected connections, e.g. connections to data output ports. -- Project page breaks if file resources are missing meta data like size or modified. -- Workflow operator: became invisible after workflow error. -- Always return a 401 (not authorized) instead of a 500 response when refreshing a token has failed with an `invalid_grant` error from Keycloak. -- Rule endpoint does not return new parameters after plugin upgrade. -- Zip file created with macOS Archive Utility containing files with macOS-specific metadata not working in bulk datasets. -- Fixed `overlayEditors` that close even with unsaved changes. -- Consistent navigation behavior or indicate links/buttons that open a new tab/window. -- Replace `highlightedState` properties in workflow editor. -- Rule endpoint does not return new parameters after plugin upgrade. -- Python: Changes in plugin submodule not recognized without restarting DI. -- Dataset API is not fenced against misuse of file parameter. -- Workflow Task: exception message prefixed with wrong name. -- Improve upload icon in DI file upload widget. -- JDBC: H2 driver not found. -- JDBC: Dataset should not write an extra `rdf_type` column. -- JDBC: Cannot rerun Data preview without reopening JDBC dataset update dialog. -- JDBC: naming the URI column `uri` causes `is ambiguous` error. +- Workflow Task: exception message prefixed with wrong name. +- Workflow editor: + - Workflow nodes with only a single dependency output might be executed twice. + - Loose connections from dependency ports not working anymore. + - Allow dependency connection from/to replaceable datasets. + - Drawing dependency connections from input dependency port results in unexpected connections, e.g. connections to data output ports. +- Project page breaks if file resources are missing meta data like size or modified. +- Workflow operator: became invisible after workflow error. +- Always return a 401 (not authorized) instead of a 500 response when refreshing a token has failed with an `invalid_grant` error from Keycloak. +- Rule endpoint does not return new parameters after plugin upgrade. +- Zip file created with macOS Archive Utility containing files with macOS-specific metadata not working in bulk datasets. +- Fixed `overlayEditors` that close even with unsaved changes. +- Consistent navigation behavior or indicate links/buttons that open a new tab/window. +- Replace `highlightedState` properties in workflow editor. +- Rule endpoint does not return new parameters after plugin upgrade. +- Python: Changes in plugin submodule not recognized without restarting DI. +- Dataset API is not fenced against misuse of file parameter. +- Workflow Task: exception message prefixed with wrong name. +- Improve upload icon in DI file upload widget. +- JDBC: H2 driver not found. +- JDBC: Dataset should not write an extra `rdf_type` column. +- JDBC: Cannot rerun Data preview without reopening JDBC dataset update dialog. +- JDBC: naming the URI column `uri` causes `is ambiguous` error. ## eccenca DataManager v24.2.2 @@ -97,74 +97,74 @@ We are excited to announce the latest update to DataManager v24.2, which introdu v24.2.2 of DataManager ships the following fixes: -- Fixed the statistics display for link rules with inverted properties +- Fixed the statistics display for link rules with inverted properties v24.2.1 of DataManager ships the following fixes: -- Fixed _Create ``_-Button in explore -- Select NodeShapes according to their `sh:order` -- Resolve NodeShapes sequentially, instead of parallel -- Node selection clears its state when deleting nodes -- Easynav: non-saved values disappear after search value change -- Resource list is updated after the creation of a new resource -- Newly created relations based on inverted shapes are pointing in the right direction -- Setting language for text areas +- Fixed _Create ``_-Button in explore +- Select NodeShapes according to their `sh:order` +- Resolve NodeShapes sequentially, instead of parallel +- Node selection clears its state when deleting nodes +- Easynav: non-saved values disappear after search value change +- Resource list is updated after the creation of a new resource +- Newly created relations based on inverted shapes are pointing in the right direction +- Setting language for text areas v24.2.0 of DataManager adds the following new features: -- _Charts module_ - - Support for grouping chart series. - - Support for Sankey chart type. -- _Business Knowledge Editor - EasyNav_ - - Easynav allows to create new, directly connected resources with the via the browse dialogue. - - Improved undo/redo functionality. - - Improved internal structure and robustness. - - Improved selection and searching. - - Adding new connections via shacl shape queries. - - Adding new connections on unsaved nodes. - - Unique Visualization names are enforced. -- _Other_ - - Added a global notifications queue with the dropdown menu besides the user menu. - - Update Spring Wrapper to Spring Boot 3.2. - - Added icons for read-only graphs. - - Added "algorithm" param in workspace configuration and `/proxy/:id/resource` queries. - - Added a notification feature for retrieving the query catalog data when the backend response contains validation errors with the code "DOUBLE_TRIPLE". - - Added the multi-source turtle component. +- _Charts module_ + - Support for grouping chart series. + - Support for Sankey chart type. +- _Business Knowledge Editor - EasyNav_ + - Easynav allows to create new, directly connected resources with the via the browse dialogue. + - Improved undo/redo functionality. + - Improved internal structure and robustness. + - Improved selection and searching. + - Adding new connections via shacl shape queries. + - Adding new connections on unsaved nodes. + - Unique Visualization names are enforced. +- _Other_ + - Added a global notifications queue with the dropdown menu besides the user menu. + - Update Spring Wrapper to Spring Boot 3.2. + - Added icons for read-only graphs. + - Added "algorithm" param in workspace configuration and `/proxy/:id/resource` queries. + - Added a notification feature for retrieving the query catalog data when the backend response contains validation errors with the code "DOUBLE_TRIPLE". + - Added the multi-source turtle component. v24.2.0 of DataManager ships the following changes: -- _SHACL Component_ - SHACL2 replaces now our default SHACL viewer/editor and brings in lot of new features and enhancements. +- _SHACL Component_ - SHACL2 replaces now our default SHACL viewer/editor and brings in lot of new features and enhancements. Note: this component was already introduced in v24.1, so this list is not exhaustive. - - Improved Validation of inputs. - - Added the possibility to add properties that are not visible to shacl2. - - Migrated annotations to shacl2. - - Value and UI Query are used in relation manager, if provided. - - Added check for simple widget using `defaultResourceViewerIri`. - - Improved editor for highly connected resources. - - Improved access condition handling when creating new resources. - - Improved default language handling. - - Improved Layout for long labels. - - Partial support for qualified value shapes. - - Sticky toolbar for better usability. -- _Access Conditions_ - New Access conditions management interface replaces the former component. - - Validation for the creation items, added links to grid items. - - AC review page. + - Improved Validation of inputs. + - Added the possibility to add properties that are not visible to shacl2. + - Migrated annotations to shacl2. + - Value and UI Query are used in relation manager, if provided. + - Added check for simple widget using `defaultResourceViewerIri`. + - Improved editor for highly connected resources. + - Improved access condition handling when creating new resources. + - Improved default language handling. + - Improved Layout for long labels. + - Partial support for qualified value shapes. + - Sticky toolbar for better usability. +- _Access Conditions_ - New Access conditions management interface replaces the former component. + - Validation for the creation items, added links to grid items. + - AC review page. v24.2.0 of DataManager ships the following fixes: -- _Error messages_ - - Improved error messages format, now they are more informative and user-friendly with title and details sections. - - Warnings are shown locally where triggered and errors are added to the global notifications queue. - - Warnings are shown properly without breaking the UI, closer to the place that triggered it. -- Workflow triggers reload the page after the workflow is finished. -- Resource tags are links. -- _Explore_ - - Navigation component honors the module settings for navigationItemsPerPage. - - Navigation component shows correct pagination for search. - - Turtle tab is visible, even if the user has no write access. -- _Business Knowledge Editor - EasyNav_ - - Inverse properties are shown in the correct direction. - - Labels with more than approx. 24 characters and no white space are now correctly split into two lines. +- _Error messages_ + - Improved error messages format, now they are more informative and user-friendly with title and details sections. + - Warnings are shown locally where triggered and errors are added to the global notifications queue. + - Warnings are shown properly without breaking the UI, closer to the place that triggered it. +- Workflow triggers reload the page after the workflow is finished. +- Resource tags are links. +- _Explore_ + - Navigation component honors the module settings for navigationItemsPerPage. + - Navigation component shows correct pagination for search. + - Turtle tab is visible, even if the user has no write access. +- _Business Knowledge Editor - EasyNav_ + - Inverse properties are shown in the correct direction. + - Labels with more than approx. 24 characters and no white space are now correctly split into two lines. ## eccenca DataPlatform v24.2.1 @@ -172,60 +172,60 @@ We're excited to bring you the latest update to DataPlatform v24.2, which introd v24.2.1 of DataPlatform ships the following fixes: -- Backup archives are zipped with ZIP64 option to allow >=4G archives -- Prevent issues with long-running shacl batch jobs on single node deployments -- Include subclasses in validation of `sh:class` -- Improved validation of file uploads +- Backup archives are zipped with ZIP64 option to allow >=4G archives +- Prevent issues with long-running shacl batch jobs on single node deployments +- Include subclasses in validation of `sh:class` +- Improved validation of file uploads v24.2.0 of DataPlatform adds the following new features: -- Added migration of workspace configuration - - workspace/module configurations are stored in JSON content literals in the CMEM Config Graph since v23.1. - - New field in workspace configuration graph. - - Endpoint for migrating workspace configurations to current version. - - Extension of actuator info endpoint: Shows current version and number of items to migrate. -- Added SHACL RDF validation view for SHACL batch validations, `POST /api/shacl/validation/batches` - - `validationResultsTargetGraph`: Graph to write rdf validation model into after batch finishes - - `replace`: boolean value on whether to replace the graph (default: false) -- Added option for SHACL Batch run to query target resources with a ignore list for OWL imports, `POST /api/shacl/validation/batches` +- Added migration of workspace configuration + - workspace/module configurations are stored in JSON content literals in the CMEM Config Graph since v23.1. + - New field in workspace configuration graph. + - Endpoint for migrating workspace configurations to current version. + - Extension of actuator info endpoint: Shows current version and number of items to migrate. +- Added SHACL RDF validation view for SHACL batch validations, `POST /api/shacl/validation/batches` + - `validationResultsTargetGraph`: Graph to write rdf validation model into after batch finishes + - `replace`: boolean value on whether to replace the graph (default: false) +- Added option for SHACL Batch run to query target resources with a ignore list for OWL imports, `POST /api/shacl/validation/batches` - `owlImportsIgnoreList`: A set of graph IRIs which are not queried in the resource selection (i.e. owl imports ignored) -- Added module Access-Control to workspace configuration - - Existing module Administration split into workspace configuration and access control. - - Existing administration module used for workspace configuration (as to avoid migration steps). -- Add alternative endpoint to `/api/shapes/list` called `/api/shapes/listWithValidation` - - return object contains a field for errors in the data which prevent mapping. - - asked behavior results in shape not found responses on shapes which cannot be mapped -- Add `owlImportsResolution` to resource store endpoints - - optional parameter overriding dataplatform setting +- Added module Access-Control to workspace configuration + - Existing module Administration split into workspace configuration and access control. + - Existing administration module used for workspace configuration (as to avoid migration steps). +- Add alternative endpoint to `/api/shapes/list` called `/api/shapes/listWithValidation` + - return object contains a field for errors in the data which prevent mapping. + - asked behavior results in shape not found responses on shapes which cannot be mapped +- Add `owlImportsResolution` to resource store endpoints + - optional parameter overriding dataplatform setting v24.2.0 of DataPlatform ships the following changes: -- Update to Apache Jena 5 - - All JSON-LD output in JSON-LD 1.1, JSON-LD 1.0 support dropped -- Changed to Access Condition endpoints - - `GET /api/authorization/groups` return IRIs instead of names (including public/admin group) - - `GET /api/authorization splits` pageable parameter into single parameters page, size, sort -- Deactivated graph db change tracking as default -- Extension of ACL review endpoint, response updated with matching access conditions -- Additional SHUI-Property for defining object relation default view - - `shui:viewResourcesWithWidget` with values (`shui:ComplexResourceViewerWidget`, `shui:SimpleResourceViewerWidget`) -- Resource API endpoints `/proxy/{id}/resource` changed - - Additional application parameter proxy.maxCBDStatements (default: 1000000) for limiting amount of statements in memory when loading (S)CBD - - CBD calculation does not include reifications anymore - - Additional one query based algorithm for CBD calculation (algorithm can be selected with optional query parameter algorithm) - - workspace configuration: `apiConfiguration.conciseBoundLoadAlgorithm` (`ITERATIVE`, `QUERY`) -- Endpoint for workspace configuration set `/api/conf/workspaces` falls back on system default -- New default icons for resource, class and properties -- Removal of native stardog integration +- Update to Apache Jena 5 + - All JSON-LD output in JSON-LD 1.1, JSON-LD 1.0 support dropped +- Changed to Access Condition endpoints + - `GET /api/authorization/groups` return IRIs instead of names (including public/admin group) + - `GET /api/authorization splits` pageable parameter into single parameters page, size, sort +- Deactivated graph db change tracking as default +- Extension of ACL review endpoint, response updated with matching access conditions +- Additional SHUI-Property for defining object relation default view + - `shui:viewResourcesWithWidget` with values (`shui:ComplexResourceViewerWidget`, `shui:SimpleResourceViewerWidget`) +- Resource API endpoints `/proxy/{id}/resource` changed + - Additional application parameter proxy.maxCBDStatements (default: 1000000) for limiting amount of statements in memory when loading (S)CBD + - CBD calculation does not include reifications anymore + - Additional one query based algorithm for CBD calculation (algorithm can be selected with optional query parameter algorithm) + - workspace configuration: `apiConfiguration.conciseBoundLoadAlgorithm` (`ITERATIVE`, `QUERY`) +- Endpoint for workspace configuration set `/api/conf/workspaces` falls back on system default +- New default icons for resource, class and properties +- Removal of native stardog integration v24.2.0 of DataPlatform ships the following fixes: -- Broken workspace configurations fall back to system default workspace - - actuator info endpoint contains field `workspaceConfigurationError` on error -- SHACL controller validation endpoint fixed - - only validates constraints which do not need other data if change-set is given as in memory -- Fixed query rewriting of (named) graphs for users with limited read rights - - From Graphs Rewriting: Prevent rewrite of where clause if no graph variable found +- Broken workspace configurations fall back to system default workspace + - actuator info endpoint contains field `workspaceConfigurationError` on error +- SHACL controller validation endpoint fixed + - only validates constraints which do not need other data if change-set is given as in memory +- Fixed query rewriting of (named) graphs for users with limited read rights + - From Graphs Rewriting: Prevent rewrite of where clause if no graph variable found ## eccenca Corporate Memory Control (cmemc) v24.2.0 @@ -233,50 +233,50 @@ We're excited to bring you the latest update to Corporate Memory Control (cmemc) v24.2.0 of Corporate Memory Control (cmemc) adds the following new features: -- `admin store migrate` command - - Migrate configuration resources to the current version. -- `admin status` command -- - will warn in case there a workspace configurations, which can be migrated -- - will exit with exit code 1 in case option `--exit-1 always` is given and migratable workspaces are found -- `graph validation export` command - - export validation reports as JSON or jUnit XML -- `graph import` command - - support for importing graphs from remote HTTP/HTTPS locations -- `project import` command - - support for importing project zip files from remote HTTP/HTTPS locations -- `dataset create` command - - support for creation of resource file from remote HTTP/HTTPS locations -- `dataset upload` command - - support for uploading of resource file from remote HTTP/HTTPS locations -- `vocabulary import` command - - support for importing vocabulary from remote HTTP/HTTPS locations -- `smart_path` package as a replacement for `pathlib.Path` and expanded functionality to support both local file paths and remote file paths -- `ClickSmartPath` parameter type, extending `click.path` to accommodate remote files -- `graph validation execute` command group - - option `--query` to allow specifying a select query for resource selection. - - option `--ignore-graph` to provide multiple graph IRIs to be excluded from the resource selection. - - option `--result-graph` to specifies the graph where the validation results will be written. - - option `--replace` to replace the result graph with new validation results +- `admin store migrate` command + - Migrate configuration resources to the current version. +- `admin status` command + - will warn in case there a workspace configurations, which can be migrated + - will exit with exit code 1 in case option `--exit-1 always` is given and migratable workspaces are found +- `graph validation export` command + - export validation reports as JSON or jUnit XML +- `graph import` command + - support for importing graphs from remote HTTP/HTTPS locations +- `project import` command + - support for importing project zip files from remote HTTP/HTTPS locations +- `dataset create` command + - support for creation of resource file from remote HTTP/HTTPS locations +- `dataset upload` command + - support for uploading of resource file from remote HTTP/HTTPS locations +- `vocabulary import` command + - support for importing vocabulary from remote HTTP/HTTPS locations +- `smart_path` package as a replacement for `pathlib.Path` and expanded functionality to support both local file paths and remote file paths +- `ClickSmartPath` parameter type, extending `click.path` to accommodate remote files +- `graph validation execute` command group + - option `--query` to allow specifying a select query for resource selection. + - option `--ignore-graph` to provide multiple graph IRIs to be excluded from the resource selection. + - option `--result-graph` to specifies the graph where the validation results will be written. + - option `--replace` to replace the result graph with new validation results v24.2.0 of Corporate Memory Control (cmemc) ships the following fixes: -- `graph import` command - - importing a directory to a single graph no longer raises an error but imports all turtle files to this graph -- `admin workspace python install` command - - report errors from update_plugins API -- using not existing configurations (`-c` / `--configuration`) now results in a proper error message -- `workflow io` command - - can now generate ttl output files -- `admin workspace python list` command - - listing of published packages with the `--available` option now works for more than 19 packages -- `graph export` command - - newly created directories have correct access conditions now -- `vocabulary install` command - - raise proper usage error messages -- `vocabulary uninstall` command - - raise proper usage error messages -- `admin store export` command - - validates the exported zip and raises an error in case of a corrupted ZIP export +- `graph import` command + - importing a directory to a single graph no longer raises an error but imports all turtle files to this graph +- `admin workspace python install` command + - report errors from update_plugins API +- using not existing configurations (`-c` / `--configuration`) now results in a proper error message +- `workflow io` command + - can now generate ttl output files +- `admin workspace python list` command + - listing of published packages with the `--available` option now works for more than 19 packages +- `graph export` command + - newly created directories have correct access conditions now +- `vocabulary install` command + - raise proper usage error messages +- `vocabulary uninstall` command + - raise proper usage error messages +- `admin store export` command + - validates the exported zip and raises an error in case of a corrupted ZIP export ## Migration Notes @@ -308,20 +308,19 @@ jdbc:mariadb://:/?sessionVariables=sql_mode=ANSI ### DataPlatform -- Due to the update to Apache Jena 5 all JSON-LD output now conforms to JSON-LD 1.1 (was JSON-LD 1.0): - - JSON-LD 1.1 has been designed as a superset of 1.0, so we do not expect any problems, just making you aware that there _might_ be differences when using it with JSON-LD 1.0 and 1.1 processors at the same time (e.g. to compare results) +- Due to the update to Apache Jena 5 all JSON-LD output now conforms to JSON-LD 1.1 (was JSON-LD 1.0): + - JSON-LD 1.1 has been designed as a superset of 1.0, so we do not expect any problems, just making you aware that there _might_ be differences when using it with JSON-LD 1.0 and 1.1 processors at the same time (e.g. to compare results) > JSON-LD 1.1 introduces new features that are compatible with JSON-LD 1.0, but if processed by a JSON-LD 1.0 processor may produce different results (cf. [JSON-LD 1.1 Framing](https://www.w3.org/TR/json-ld11-framing/#features)) - - Affected endpoints: - - `POST /proxy/{id}/resource/framed` - - `POST /proxy/{id}/sparql/framed` - - `POST /authorization/conditions/framed` - - `POST+GET /api/queries/jsonld/perform` -- The Access Condition endpoint `GET /api/authorization/groups` returns now IRIs instead of names (including public/admin group). -- Resource API endpoints `/proxy/{id}/resource` changed, the CBD calculation does not include reifications anymore. -- The native stardog integration has been removed. As a stardog user you need to migrate your store configuration to use the generic HTTP-Store configuration option. + - Affected endpoints: + - `POST /proxy/{id}/resource/framed` + - `POST /proxy/{id}/sparql/framed` + - `POST /authorization/conditions/framed` + - `POST+GET /api/queries/jsonld/perform` +- The Access Condition endpoint `GET /api/authorization/groups` returns now IRIs instead of names (including public/admin group). +- Resource API endpoints `/proxy/{id}/resource` changed, the CBD calculation does not include reifications anymore. +- The native stardog integration has been removed. As a stardog user you need to migrate your store configuration to use the generic HTTP-Store configuration option. ### cmemc - The [`admin status`](../../automate/cmemc-command-line-interface/command-reference/admin/index.md#admin-status) command in combination with the `--exit-1 always` option now exits with status code 1 in the additional case that migrate-able workspace configurations are found - - To avoid this, you can automatically migrate the configurations with the [`admin store migrate`](../../automate/cmemc-command-line-interface/command-reference/admin/store/index.md#admin-store-migrate) command. - + - To avoid this, you can automatically migrate the configurations with the [`admin store migrate`](../../automate/cmemc-command-line-interface/command-reference/admin/store/index.md#admin-store-migrate) command. diff --git a/docs/release-notes/corporate-memory-24-3/index.md b/docs/release-notes/corporate-memory-24-3/index.md index bd11bf973..8de9c28db 100644 --- a/docs/release-notes/corporate-memory-24-3/index.md +++ b/docs/release-notes/corporate-memory-24-3/index.md @@ -12,12 +12,12 @@ Corporate Memory 24.3.2 is the third major release in 2024. The highlights of this release are: -- Explore and Author: - - New shacl2 engine now used in the Business Knowledge Editor sidebar for a whole new experience when viewing and editing node details. -- Build: - - Workflow reports now show a preview of the output entities being produced, allowing quick review and verification of the underlying workflow tasks.. -- Automate: - - The `cmem` command group `admin migration`, which adds various migration recipes to make it easier to upgrade to new versions of Corporate Memory. +- Explore and Author: + - New shacl2 engine now used in the Business Knowledge Editor sidebar for a whole new experience when viewing and editing node details. +- Build: + - Workflow reports now show a preview of the output entities being produced, allowing quick review and verification of the underlying workflow tasks.. +- Automate: + - The `cmem` command group `admin migration`, which adds various migration recipes to make it easier to upgrade to new versions of Corporate Memory. !!! info inline end "Important info" @@ -25,14 +25,14 @@ The highlights of this release are: This release delivers the following component versions: -- eccenca DataIntegration v24.3.1 -- eccenca Explore v24.3.0 (formerly DataPlatform and DataManager) -- eccenca Corporate Memory Control (cmemc) v24.3.3 +- eccenca DataIntegration v24.3.1 +- eccenca Explore v24.3.0 (formerly DataPlatform and DataManager) +- eccenca Corporate Memory Control (cmemc) v24.3.3 We tested this release with the following dependency components: -- Ontotext GraphDB v10.8.3 -- Keycloak v25.0.6 +- Ontotext GraphDB v10.8.3 +- Keycloak v25.0.6 More detailed information for this release is provided in the next sections. @@ -42,52 +42,52 @@ We're excited to bring you the latest update to DataIntegration v24.3, which int **v24.3.1 of DataIntegration adds the following new features:** -- Added download button to workflow report tab. +- Added download button to workflow report tab. **v24.3.1 of DataIntegration ships the following fixes:** -- Task descriptions with long strings do not lead to horizontal scroll bars. -- Tag search might very shortly show old search suggestions. -- Missing unit for _Matching timeout_. -- _Regex selection_ transformer has mis-formatted documentation. -- Added JDBC dataset documentation on how to configure ANSI quotes for MySQL. -- Superfluous `CREATE SILENT GRAPH` leads to slow update performance. -- Fix SQL editor inputs. +- Task descriptions with long strings do not lead to horizontal scroll bars. +- Tag search might very shortly show old search suggestions. +- Missing unit for _Matching timeout_. +- _Regex selection_ transformer has mis-formatted documentation. +- Added JDBC dataset documentation on how to configure ANSI quotes for MySQL. +- Superfluous `CREATE SILENT GRAPH` leads to slow update performance. +- Fix SQL editor inputs. **v24.3.0 of DataIntegration adds the following new features:** -- Workspace search: - - Support to filter workflows that contain replaceable datasets. - - Display tags on workflow search items when they contain replaceable datasets. - - Add file name and graph URIs to search items as searchable tags. -- Workflow editor: - - Support creating knowledge graph datasets from DataPlatform graphs matching the search query. - - Added copy prefixes option in copy task dialog. -- Integration of a Prometheus endpoint to expose many useful metrics. -- Transform operators to retrieve attributes from input tasks: - - _Input Task attributes_ retrieves individual attributes from the input task (such as the modified date) or the entire task as JSON. - - _Input file attributes_ retrieves a metadata attribute from the input file (such as the file name). -- JdbcDialect implementation for Trino: Fixes STRING type mapping, adds isolationLevel option to avoid Connections resetting AutoCommit mode and serves as example for the dialect concept. -- File hash transformer: - - Calculates the hash sum of a given file - - Works on either the input file dataset or a selected file from the project -- JSON special paths: - - `#propertyName` accesses the current object key - - `*` selects all direct children of the current token -- Add link from a task parameter description into the task's Markdown documentation for this parameter, if available. -- Show sample (output) entities for workflow operators in the workflow reports. -- Text dataset allows to configure the zip regex. -- Support setting the locale for the `Parse date pattern` and `Parse date` transform operators. - - `*` selects all direct children of the current token -- More fine-grained access control: - - In addition to a base action, it is possible to specify as many specific actions that protect specific endpoints. - - Endpoints are configured in a whitelist as URI prefixes per specific action. - - All endpoints that are protected by any specific action cannot be accessed anymore via the base action. - - Two new actions are configured by default and protect the Python plugin management and specific workspace API endpoints. See changes and migrations. -- Global variables can be marked sensitive for storing passwords: - - Sensitive variables can only be used in password fields. - - Using sensitive variables in other fields or in variable templates fails and does not expose the value. - - Example: +- Workspace search: + - Support to filter workflows that contain replaceable datasets. + - Display tags on workflow search items when they contain replaceable datasets. + - Add file name and graph URIs to search items as searchable tags. +- Workflow editor: + - Support creating knowledge graph datasets from DataPlatform graphs matching the search query. + - Added copy prefixes option in copy task dialog. +- Integration of a Prometheus endpoint to expose many useful metrics. +- Transform operators to retrieve attributes from input tasks: + - _Input Task attributes_ retrieves individual attributes from the input task (such as the modified date) or the entire task as JSON. + - _Input file attributes_ retrieves a metadata attribute from the input file (such as the file name). +- JdbcDialect implementation for Trino: Fixes STRING type mapping, adds isolationLevel option to avoid Connections resetting AutoCommit mode and serves as example for the dialect concept. +- File hash transformer: + - Calculates the hash sum of a given file + - Works on either the input file dataset or a selected file from the project +- JSON special paths: + - `#propertyName` accesses the current object key + - `*` selects all direct children of the current token +- Add link from a task parameter description into the task's Markdown documentation for this parameter, if available. +- Show sample (output) entities for workflow operators in the workflow reports. +- Text dataset allows to configure the zip regex. +- Support setting the locale for the `Parse date pattern` and `Parse date` transform operators. + - `*` selects all direct children of the current token +- More fine-grained access control: + - In addition to a base action, it is possible to specify as many specific actions that protect specific endpoints. + - Endpoints are configured in a whitelist as URI prefixes per specific action. + - All endpoints that are protected by any specific action cannot be accessed anymore via the base action. + - Two new actions are configured by default and protect the Python plugin management and specific workspace API endpoints. See changes and migrations. +- Global variables can be marked sensitive for storing passwords: + - Sensitive variables can only be used in password fields. + - Using sensitive variables in other fields or in variable templates fails and does not expose the value. + - Example: ```conf config.variables = { @@ -100,80 +100,80 @@ We're excited to bring you the latest update to DataIntegration v24.3, which int } ``` -- Delete project files operator: Allows to delete project files in a workflow based on a regex. -- Added Snowflake dataset type. +- Delete project files operator: Allows to delete project files in a workflow based on a regex. +- Added Snowflake dataset type. **v24.3.0 of DataIntegration introduces the following changes:** -- Optimized writing to Neo4j, resulting in a 25x speed improvement. -- Upgraded Spark to 3.5.3. -- Upgraded to typescript version 5.5.3. -- After saving a workflow the undo/redo queues are cleared which is consistent with other editors in DI/DM. -- Renamed DI action from `urn:eccenca:di` to ``. -- Line breaks are forced for evaluation preview tooltips. -- If a project is copied to another project, all referenced project variables and their dependent variables are copied to the target project as well. -- docker image: switch to `eclipse-temurin:17-ubi9-minimal` base image -- Prefix handling: - - Only prefixes added to a specific project are serialized/exported, no prefixes loaded by the workspace (e.g. from DP). - - Only load user prefixes and prefixes of installed vocabularies from DP into DI. -- All datasets that support zips can be written now. -- Increase visibility of breadcrumbs in application header. -- Configurable Favicon in DataIntegration. +- Optimized writing to Neo4j, resulting in a 25x speed improvement. +- Upgraded Spark to 3.5.3. +- Upgraded to typescript version 5.5.3. +- After saving a workflow the undo/redo queues are cleared which is consistent with other editors in DI/DM. +- Renamed DI action from `urn:eccenca:di` to ``. +- Line breaks are forced for evaluation preview tooltips. +- If a project is copied to another project, all referenced project variables and their dependent variables are copied to the target project as well. +- docker image: switch to `eclipse-temurin:17-ubi9-minimal` base image +- Prefix handling: + - Only prefixes added to a specific project are serialized/exported, no prefixes loaded by the workspace (e.g. from DP). + - Only load user prefixes and prefixes of installed vocabularies from DP into DI. +- All datasets that support zips can be written now. +- Increase visibility of breadcrumbs in application header. +- Configurable Favicon in DataIntegration. **v24.3.0 of DataIntegration ships the following fixes:** -- Jinja templates can lead to OutOfMemory issues. -- Loading of JDBC Type 4 Drivers from Jar at runtime. -- Add add-opens JDK option to sbt parameters to avoid Serialization errors in executors. -- User defined function removed to prevent startup error in local dev mode. -- After saving a workflow the workflow editor can be closed without warning of unsaved changes. -- Race condition in Excel map transformer cache. -- Remote Client-Side Code Execution through CSV Injection identified in penetration testing. -- CSV datasets should not be cleared at the beginning of a workflow since they are overwritten anyway. -- Ports of datasets are shown as required in workflow validation, but are not. -- In workspace/project item search disable Enter behavior while a search is pending. -- Use correct icons for copy/clone actions. -- Workflow editor: - - Workflow is not re-validated after undo/redo operations. - - Re-configuring a workflow node to not having a data output is not immediately visible (only after reload). - - When the `Create new dataset` operator is used it always creates a _dataset_ even though the item type was changed. - - Caches of file base datasets are not refreshed when updated via file download operator. - - Dependency ports checkbox does not show checkmark in workflow tasks with unconnected output port. - - Fix text on node menu options that have a checkbox. Always show the _enabled_ text. -- REST task: - - When paging is enabled and entities are output only the last request result is output. - - Add TLSv1.3 support. -- Hierarchical mapping editor: Entity relationship direction input does not show current selection. -- Transform rule editor: - - Validation errors are not shown when starting the evaluation. - - Notifications are not correctly cleared and shown. -- Transform execution report: - - Type URI validation issues are not shown in the transform execution report. - - Rule tree in transform execution report and evaluation tab has a broken collapse/expand state. -- Password parameter templates are empty initially. -- Fix issues in create/update dialog: - - Depending input gets disabled if dependent input has an empty default value. - - Data preview of dataset with nested parameters is not working. -- Task config preview has a different parameter ordering than in the create/update dialog. -- Evaluation of a text path of a text dataset in a rule editor fails. -- Cannot execute SPARQL update queries with parameter templates. -- `Evaluate template` operator: Changed project variable not updated without evaluating transform. -- Jinja interpreter does not clear previous errors. -- Process of opening and closing the handle tools menu. -- Manually defined project prefixes are automatically copied to other projects after reload. -- Removing a vocabulary does not remove the vocabulary prefix from the DI projects. -- Cannot reconfigure parameter values with templates in workflows. -- Workflow report shows multiple executions of some operators even though they were only executed once. -- Python Workflow status incorrect. -- Python Workflow operators could not be cancelled in some cases. -- Alignment dataset should support the clear method so it can be used in workflows. -- Drop zone in workflow editor freezes sometimes after dropping an operator. -- Transform/Linking operator's 'Restriction' documentation is incorrectly formatted. -- DI project "Items per page" cuts off "100" as "1...". -- Wide task descriptions are not nicely scrollable. -- Inline documentation of `Clean HTML` is incomplete/wrong. -- Cannot delete mapping rule target type anymore. -- SPARQL Construct task does not update its execution report. +- Jinja templates can lead to OutOfMemory issues. +- Loading of JDBC Type 4 Drivers from Jar at runtime. +- Add add-opens JDK option to sbt parameters to avoid Serialization errors in executors. +- User defined function removed to prevent startup error in local dev mode. +- After saving a workflow the workflow editor can be closed without warning of unsaved changes. +- Race condition in Excel map transformer cache. +- Remote Client-Side Code Execution through CSV Injection identified in penetration testing. +- CSV datasets should not be cleared at the beginning of a workflow since they are overwritten anyway. +- Ports of datasets are shown as required in workflow validation, but are not. +- In workspace/project item search disable Enter behavior while a search is pending. +- Use correct icons for copy/clone actions. +- Workflow editor: + - Workflow is not re-validated after undo/redo operations. + - Re-configuring a workflow node to not having a data output is not immediately visible (only after reload). + - When the `Create new dataset` operator is used it always creates a _dataset_ even though the item type was changed. + - Caches of file base datasets are not refreshed when updated via file download operator. + - Dependency ports checkbox does not show checkmark in workflow tasks with unconnected output port. + - Fix text on node menu options that have a checkbox. Always show the _enabled_ text. +- REST task: + - When paging is enabled and entities are output only the last request result is output. + - Add TLSv1.3 support. +- Hierarchical mapping editor: Entity relationship direction input does not show current selection. +- Transform rule editor: + - Validation errors are not shown when starting the evaluation. + - Notifications are not correctly cleared and shown. +- Transform execution report: + - Type URI validation issues are not shown in the transform execution report. + - Rule tree in transform execution report and evaluation tab has a broken collapse/expand state. +- Password parameter templates are empty initially. +- Fix issues in create/update dialog: + - Depending input gets disabled if dependent input has an empty default value. + - Data preview of dataset with nested parameters is not working. +- Task config preview has a different parameter ordering than in the create/update dialog. +- Evaluation of a text path of a text dataset in a rule editor fails. +- Cannot execute SPARQL update queries with parameter templates. +- `Evaluate template` operator: Changed project variable not updated without evaluating transform. +- Jinja interpreter does not clear previous errors. +- Process of opening and closing the handle tools menu. +- Manually defined project prefixes are automatically copied to other projects after reload. +- Removing a vocabulary does not remove the vocabulary prefix from the DI projects. +- Cannot reconfigure parameter values with templates in workflows. +- Workflow report shows multiple executions of some operators even though they were only executed once. +- Python Workflow status incorrect. +- Python Workflow operators could not be cancelled in some cases. +- Alignment dataset should support the clear method so it can be used in workflows. +- Drop zone in workflow editor freezes sometimes after dropping an operator. +- Transform/Linking operator's 'Restriction' documentation is incorrectly formatted. +- DI project "Items per page" cuts off "100" as "1...". +- Wide task descriptions are not nicely scrollable. +- Inline documentation of `Clean HTML` is incomplete/wrong. +- Cannot delete mapping rule target type anymore. +- SPARQL Construct task does not update its execution report. ## eccenca Explore v24.3.0 @@ -186,122 +186,122 @@ We are excited to announce Explore v24.3, which introduces new features, improve **v24.3.0 of Explore adds the following new features:** -- Help system - - Implemented a renovated help system with global and local context of documentation -- BusinessKnowledgeEditor (BKE) - - Rename "EasyNav" to "Business Knowledge Editor" - - Keep search bar state when visualization is saved - - Set BKE as default - - Creation of customizable class on a property shape path - - Edge type selection shows shape description on hover - - Keep search bar state when visualization is saved -- Notifications - - Added a warning message, if a user is part of a fallback admin group -- Query module - - Icons added to the query dropdown functionalities -- Access Condition - - Provided custom search function for graphs in ACDetails -- SHACL - - Creation of customizable class on a property shape path - - Workflows are triggered upon editing the resource -- Workspace configuration - - Added a support for `GRAPH` placeholder in the `navigationSearchQuery` -- Other - - Added endpoint for resolving node shapes of a resource evaluating target class only for explore - - Added flag to `/userinfo` response if user is root user - - Added support for multiline in turtle editor - - Added actuator proxy endpoint for GraphDB actuators - - hidden endpoints under `/dataplatform/actuator/proxy/graphdb/**` - - Added support for gzip payload compression in SPARQL Graph Store endpoints - - Content-Encoding / Accept-Encoding used with value gzip - - Added simple zip-bomb check for gzipped content - - Configuration: `proxy.gspUploadGzipContentLimit` sets limit in bytes of uncompressed graph file in gzip (default 5 GB) - - Added endpoint for retrieval of resource descriptions (i.e. rdfs:comment) - - signature same as for title resolving - - Added additional prometheus endpoint under different port and no authentication - - Configuration under deploy - - `deploy.additional-prometheus-endpoint.enabled` (default: false) - - `deploy.additional-prometheus-endpoint.port` (default: 9091) - - `deploy.additional-prometheus-endpoint.context` (default: /metrics) +- Help system + - Implemented a renovated help system with global and local context of documentation +- BusinessKnowledgeEditor (BKE) + - Rename "EasyNav" to "Business Knowledge Editor" + - Keep search bar state when visualization is saved + - Set BKE as default + - Creation of customizable class on a property shape path + - Edge type selection shows shape description on hover + - Keep search bar state when visualization is saved +- Notifications + - Added a warning message, if a user is part of a fallback admin group +- Query module + - Icons added to the query dropdown functionalities +- Access Condition + - Provided custom search function for graphs in ACDetails +- SHACL + - Creation of customizable class on a property shape path + - Workflows are triggered upon editing the resource +- Workspace configuration + - Added a support for `GRAPH` placeholder in the `navigationSearchQuery` +- Other + - Added endpoint for resolving node shapes of a resource evaluating target class only for explore + - Added flag to `/userinfo` response if user is root user + - Added support for multiline in turtle editor + - Added actuator proxy endpoint for GraphDB actuators + - hidden endpoints under `/dataplatform/actuator/proxy/graphdb/**` + - Added support for gzip payload compression in SPARQL Graph Store endpoints + - Content-Encoding / Accept-Encoding used with value gzip + - Added simple zip-bomb check for gzipped content + - Configuration: `proxy.gspUploadGzipContentLimit` sets limit in bytes of uncompressed graph file in gzip (default 5 GB) + - Added endpoint for retrieval of resource descriptions (i.e. rdfs:comment) + - signature same as for title resolving + - Added additional prometheus endpoint under different port and no authentication + - Configuration under deploy + - `deploy.additional-prometheus-endpoint.enabled` (default: false) + - `deploy.additional-prometheus-endpoint.port` (default: 9091) + - `deploy.additional-prometheus-endpoint.context` (default: /metrics) **v24.3.0 of Explore ships the following changes:** -- EasyNav - - Created a fallback module, marked as deprecated - - Used the old EP for saving data in easynav -- Charts Module - - Sunburst chart in the Explore module - `Statistics` tab reimplemented with ECharts -- CodeMirror editor - - Replaced the library for the common usage, added linters for the Editor -- SHACL - - Split `ShaclContextProvider` into controlled and uncontrolled versions to maintain changes from outer component - - Hardcoded descriptions endpoint replaced with a proper one from DP - - Added information about different validation types to the validation control -- `RDFResourceLinkRule` Component - - Renamed to `RDFResourceTag`, added the titles query for cases where only the item resource is provided, added the `RTKLoadingErrorElement` wrapper -- Thesaurus - - Translate the Thesaurus Module to our modern UI Stack -- Other - - docker image: switch to `eclipse-temurin:17-ubi9-minimal` base image - - Library Updates - - Spring Boot 3.3 - - Apache Jena 5.2.0 - - Removed access conditions from bootstrap data - - Add username to unauthorized graph access error in log - - Replace account information placeholders in customized queries - - Renamed `{{username}}` placeholder in the _GraphTemplateJinjaTemplate_ to `{{shuiAccountName}}` - - Renamed `{{username}}` SPARQL Query placeholder (available in `onDeleteUpdate`, `onInsertUpdate`, `onUpdateUpdate`, `shui:uiQuery` and `shui:valueQuery`) to `{{shuiAccount}}` - - Changed integration of non-validating property shapes in SHACL node shape model - - Added concept of widget integration linked to node shape which have basic SHACL Properties for form UI - - Label, Description, Order, Group, link to widget - - Widget integrations carry one of the types Workflow Trigger, Table Report or Chart - - Deprecated / Removed link from node shape to chart i.e. node shape charts are not possible anymore - - Always check GraphDb license information on `/actuator/info` call - - Changed retrieval of installed vocabulary prefixes - - Actuator info endpoint secured - - Change class hierarchy resolving to SPARQL property path instead of recursion - - Removed `ValueView` and `ValueEdit` components from resource view components group +- EasyNav + - Created a fallback module, marked as deprecated + - Used the old EP for saving data in easynav +- Charts Module + - Sunburst chart in the Explore module - `Statistics` tab reimplemented with ECharts +- CodeMirror editor + - Replaced the library for the common usage, added linters for the Editor +- SHACL + - Split `ShaclContextProvider` into controlled and uncontrolled versions to maintain changes from outer component + - Hardcoded descriptions endpoint replaced with a proper one from DP + - Added information about different validation types to the validation control +- `RDFResourceLinkRule` Component + - Renamed to `RDFResourceTag`, added the titles query for cases where only the item resource is provided, added the `RTKLoadingErrorElement` wrapper +- Thesaurus + - Translate the Thesaurus Module to our modern UI Stack +- Other + - docker image: switch to `eclipse-temurin:17-ubi9-minimal` base image + - Library Updates + - Spring Boot 3.3 + - Apache Jena 5.2.0 + - Removed access conditions from bootstrap data + - Add username to unauthorized graph access error in log + - Replace account information placeholders in customized queries + - Renamed `{{username}}` placeholder in the _GraphTemplateJinjaTemplate_ to `{{shuiAccountName}}` + - Renamed `{{username}}` SPARQL Query placeholder (available in `onDeleteUpdate`, `onInsertUpdate`, `onUpdateUpdate`, `shui:uiQuery` and `shui:valueQuery`) to `{{shuiAccount}}` + - Changed integration of non-validating property shapes in SHACL node shape model + - Added concept of widget integration linked to node shape which have basic SHACL Properties for form UI + - Label, Description, Order, Group, link to widget + - Widget integrations carry one of the types Workflow Trigger, Table Report or Chart + - Deprecated / Removed link from node shape to chart i.e. node shape charts are not possible anymore + - Always check GraphDb license information on `/actuator/info` call + - Changed retrieval of installed vocabulary prefixes + - Actuator info endpoint secured + - Change class hierarchy resolving to SPARQL property path instead of recursion + - Removed `ValueView` and `ValueEdit` components from resource view components group **v24.3.0 of Explore ships the following fixes:** -- BusinessKnowledgeEditor (BKE) - - Disabled creation of new resources via `shui:denyNewResources` property - - Added functionality to delete a resource to the node panel - - Show a notification in case of viewing details of an unshaped node - - Changed the save request payload to include a separate change for each node shape - - Added missed "remove from the canvas" functionality - - Set correct node shapes order - - Prevent Modal key event propagation - - Create new node shows node shapes instead of classes -- Turtle editor - - Cursor prevented from jumping upon error -- `MultiSourceView` Component - - Not imported warning displaying - - Prevent loosing state while navigation is triggered - - Starting with a blank resource gives a blank screen -- SHACL - - Source link from the validation log points to the correct graph - - Node shape description toggle shows markdown both when collapsed and opened - - Slow request getting the resources per node shape is replaced with the more performant one - - Tooltip on the resource list is shown correctly - - Adjusted "Add resource" disable state for simple and complex widgets - - `sh:name` shows as property shape title instead of using title helper in the dropdown - - Depictions are shown based on the vocab `foaf:depiction` property and no longer for a specific property shape - - Fetching of property values is now done purely based on pre-parameterized SPARQL queries - - Changed replacement of SHUI `{{username}}` to `{{shuiAccountName}}` - - Replace SHACL Save API - - Also use `?_graph` variables for deleting when using a value query - - Display custom `sh:message` in validation results - - Expose `sh:name` of node shape in SHACL as primary name - `rdfs:label` fallback -- Charts - - Info notification shows when query results are empty - - Context graph is conveyed to the charts query replacement -- Image widget - - The check for image widget is replaced with the regex -- Other - - Notifications: Improved rendering behavior, prevent loops - - Fixed the statistics display for link rules with inverted properties - - Exit application with code 1 on expired license +- BusinessKnowledgeEditor (BKE) + - Disabled creation of new resources via `shui:denyNewResources` property + - Added functionality to delete a resource to the node panel + - Show a notification in case of viewing details of an unshaped node + - Changed the save request payload to include a separate change for each node shape + - Added missed "remove from the canvas" functionality + - Set correct node shapes order + - Prevent Modal key event propagation + - Create new node shows node shapes instead of classes +- Turtle editor + - Cursor prevented from jumping upon error +- `MultiSourceView` Component + - Not imported warning displaying + - Prevent loosing state while navigation is triggered + - Starting with a blank resource gives a blank screen +- SHACL + - Source link from the validation log points to the correct graph + - Node shape description toggle shows markdown both when collapsed and opened + - Slow request getting the resources per node shape is replaced with the more performant one + - Tooltip on the resource list is shown correctly + - Adjusted "Add resource" disable state for simple and complex widgets + - `sh:name` shows as property shape title instead of using title helper in the dropdown + - Depictions are shown based on the vocab `foaf:depiction` property and no longer for a specific property shape + - Fetching of property values is now done purely based on pre-parameterized SPARQL queries + - Changed replacement of SHUI `{{username}}` to `{{shuiAccountName}}` + - Replace SHACL Save API + - Also use `?_graph` variables for deleting when using a value query + - Display custom `sh:message` in validation results + - Expose `sh:name` of node shape in SHACL as primary name - `rdfs:label` fallback +- Charts + - Info notification shows when query results are empty + - Context graph is conveyed to the charts query replacement +- Image widget + - The check for image widget is replaced with the regex +- Other + - Notifications: Improved rendering behavior, prevent loops + - Fixed the statistics display for link rules with inverted properties + - Exit application with code 1 on expired license ## eccenca Corporate Memory Control (cmemc) v24.3.3 @@ -309,78 +309,78 @@ We're excited to bring you the latest update to Corporate Memory Control (cmemc) **v24.3.3 of cmemc provides the following fixes:** -- add missing migration recipe for deprecated SPARQL datatypes +- add missing migration recipe for deprecated SPARQL datatypes **v24.3.2 of cmemc provides the following fixes:** -- remove accidentally added pip dependency +- remove accidentally added pip dependency **v24.3.1 of cmemc provides the following fixes:** -- `graph import` command - - use python stdlib instead rdflib to guess mime types (lower memory footprint) +- `graph import` command + - use python stdlib instead rdflib to guess mime types (lower memory footprint) **v24.3.0 of cmemc adds the following new features:** -- `graph validation execute` command - - `--inspect` option to return the list of violations instead of the summary (includes `--wait`) -- `graph validation inpect` command - - retrieval and display of titles as terminal links for resources - - completion: retrieval and display of titles as descriptions -- `graph validation list` command - - retrieval and display of titles as terminal links for graphs -- `graph export` command - - option `--compress` to generate compressed ttl file -- `graph import` command - - support import of compressed ttl/nt files -- `admin store export` command - - `--replace` option to replace an existing file - - if no BACKUP_FILE is given, a default of `{{date}}-{{connection}}.store.zip` is used -- `project import` command - - `--replace` option to replace an existing project -- `project export` command - - `--replace` option to replace an existing file -- `admin workspace export` - - `--replace` option to replace an existing file -- `admin metrics` command group - - support for build / data integration metrics, e.g. `build:cmem_workspace_task_spec_size` - - support for GraphDB store metrics, e.g. `store:graphdb_slow_queries_count` -- `admin metrics list` command - - documentation column to output table - - `--filter` option to filter metrics table by job, name, ID, or type -- `admin acl` command group - - support for updated 24.3 access condition vocabulary and ACL graph -- `admin migration` command group - - `admin migration list` command - List migration recipes - - `admin migration execute` command - Execute needed migration recipes - - The following migration recipes are available: - - `bootstrap-data` - Re-import bootstrap system data to match current version - - `workspace-configurations` - Forward-upgrade explore workspace configurations - - `acl-graph-24.3` - Move access conditions and used queries to new ACL graph - - `acl-vocab-24.3` - Migrate auth vocabulary terms (actions and other grants) - - `chart-widgets-24.3` - Migrate Chart Property Shapes to Widget Integrations - - `workflow-trigger-widgets-24.3` - Migrate Workflow Trigger Property Shapes to Widget Integrations +- `graph validation execute` command + - `--inspect` option to return the list of violations instead of the summary (includes `--wait`) +- `graph validation inpect` command + - retrieval and display of titles as terminal links for resources + - completion: retrieval and display of titles as descriptions +- `graph validation list` command + - retrieval and display of titles as terminal links for graphs +- `graph export` command + - option `--compress` to generate compressed ttl file +- `graph import` command + - support import of compressed ttl/nt files +- `admin store export` command + - `--replace` option to replace an existing file + - if no BACKUP_FILE is given, a default of `{{date}}-{{connection}}.store.zip` is used +- `project import` command + - `--replace` option to replace an existing project +- `project export` command + - `--replace` option to replace an existing file +- `admin workspace export` + - `--replace` option to replace an existing file +- `admin metrics` command group + - support for build / data integration metrics, e.g. `build:cmem_workspace_task_spec_size` + - support for GraphDB store metrics, e.g. `store:graphdb_slow_queries_count` +- `admin metrics list` command + - documentation column to output table + - `--filter` option to filter metrics table by job, name, ID, or type +- `admin acl` command group + - support for updated 24.3 access condition vocabulary and ACL graph +- `admin migration` command group + - `admin migration list` command - List migration recipes + - `admin migration execute` command - Execute needed migration recipes + - The following migration recipes are available: + - `bootstrap-data` - Re-import bootstrap system data to match current version + - `workspace-configurations` - Forward-upgrade explore workspace configurations + - `acl-graph-24.3` - Move access conditions and used queries to new ACL graph + - `acl-vocab-24.3` - Migrate auth vocabulary terms (actions and other grants) + - `chart-widgets-24.3` - Migrate Chart Property Shapes to Widget Integrations + - `workflow-trigger-widgets-24.3` - Migrate Workflow Trigger Property Shapes to Widget Integrations **In addition to that, these changes and fixes are included:** -- cmemc will not fail anymore when the config dir is not creatable (message in debug) -- cmemc will not fail anymore when the config ini is not readable (message in debug) -- For these commands `admin acl list`, `dataset list`, `graph list`, `project list`, `admin user list`, `project variable list`, `vocabulary list`, `workflow list`, `admin workspace python list`, `admin workspace python list-plugins`, `dataset resource list`, `workflow scheduler list`, and `vocabulary cache list`: - - ommit empty tables with usage note message -- `admin status` command - - component name change: DI -> BUILD - - component name change: DP -> EXPLORE - - component removal: DM (merged with DP into EXPLORE) - - key prefix change: dp -> explore - - key prefix change: di -> build -- `project export` command - - `--filename-template` completion examples adaption -- `dataset create` command - - Support compressed zip files for dataset types including CSV, XML, JSON, YAML, and plain text. -- `admin metrics` command group - - metrics identification now as combined ID of `job_id:metrics_name` -- `admin metrics` command group - - `--job` option, use `--filter job job_id` or combined metrics ID instead +- cmemc will not fail anymore when the config dir is not creatable (message in debug) +- cmemc will not fail anymore when the config ini is not readable (message in debug) +- For these commands `admin acl list`, `dataset list`, `graph list`, `project list`, `admin user list`, `project variable list`, `vocabulary list`, `workflow list`, `admin workspace python list`, `admin workspace python list-plugins`, `dataset resource list`, `workflow scheduler list`, and `vocabulary cache list`: + - ommit empty tables with usage note message +- `admin status` command + - component name change: DI -> BUILD + - component name change: DP -> EXPLORE + - component removal: DM (merged with DP into EXPLORE) + - key prefix change: dp -> explore + - key prefix change: di -> build +- `project export` command + - `--filename-template` completion examples adaption +- `dataset create` command + - Support compressed zip files for dataset types including CSV, XML, JSON, YAML, and plain text. +- `admin metrics` command group + - metrics identification now as combined ID of `job_id:metrics_name` +- `admin metrics` command group + - `--job` option, use `--filter job job_id` or combined metrics ID instead ## Migration Notes @@ -394,12 +394,12 @@ We're excited to bring you the latest update to Corporate Memory Control (cmemc) ### eccenca DataIntegration -- CSV files are no longer deleted by default at the beginning of a workflow execution. This behavior can be changed in the CSV dataset configuration. -- Access control changes. Action URIs have been renamed and new actions are introduced by default: - - `urn:eccenca:di` -> `` (will be handled by `cmemc admin migration`, see below). - - `urn:elds-backend-all-actions` -> `` (will be handled by `cmemc admin migration`, see below). - - Python plugin management endpoints are now secured via `` action. - - Workspace admin functions (reload workspace, import workspace) are now secured via `` action. +- CSV files are no longer deleted by default at the beginning of a workflow execution. This behavior can be changed in the CSV dataset configuration. +- Access control changes. Action URIs have been renamed and new actions are introduced by default: + - `urn:eccenca:di` -> `` (will be handled by `cmemc admin migration`, see below). + - `urn:elds-backend-all-actions` -> `` (will be handled by `cmemc admin migration`, see below). + - Python plugin management endpoints are now secured via `` action. + - Workspace admin functions (reload workspace, import workspace) are now secured via `` action. ### eccenca Explore @@ -420,17 +420,17 @@ Run all migrations: `cmemc admin migration execute --all`. ### eccenca Corporate Memory Control (cmemc) -- All scripts which used the `admin status` command with the `--key` option: - - adapt the key prefixes accordingly: - - old: `cmemc admin status --key dp.info.license.validDate` - - new: `cmemc admin status --key explore.info.license.validDate` -- `admin store migrate` command deprecated - - use the `admin migration` command group instead -- `--overwrite` options deprecated - will be removed with the next major version - - affected commands: - - `project import` command - - `project export` command - - `admin workspace export` command -- All scripts which used the `admin metrics` command group: - - use combined metrics ID of `job_id:metrics_name` - - use `--filter job job_id` instead of `--job job_id` +- All scripts which used the `admin status` command with the `--key` option: + - adapt the key prefixes accordingly: + - old: `cmemc admin status --key dp.info.license.validDate` + - new: `cmemc admin status --key explore.info.license.validDate` +- `admin store migrate` command deprecated + - use the `admin migration` command group instead +- `--overwrite` options deprecated - will be removed with the next major version + - affected commands: + - `project import` command + - `project export` command + - `admin workspace export` command +- All scripts which used the `admin metrics` command group: + - use combined metrics ID of `job_id:metrics_name` + - use `--filter job job_id` instead of `--job job_id` diff --git a/docs/release-notes/corporate-memory-25-1/index.md b/docs/release-notes/corporate-memory-25-1/index.md index 7c207eebf..113123cba 100644 --- a/docs/release-notes/corporate-memory-25-1/index.md +++ b/docs/release-notes/corporate-memory-25-1/index.md @@ -12,17 +12,17 @@ Corporate Memory 25.1 is the first major release in 2025. The highlights of this release are: -- Build: **Seamless Workflow Integration** - - Directly connecting datasets with explicit schemas to workflow operators simplifies data ingestion and processing, allowing users to quickly incorporate CSV and text data into their workflows. +- Build: **Seamless Workflow Integration** + - Directly connecting datasets with explicit schemas to workflow operators simplifies data ingestion and processing, allowing users to quickly incorporate CSV and text data into their workflows. -- Build: **Improved Rule Editing Experience** - - Enhanced copy & paste functionality in rule editors boosts productivity by making it easier to manage and edit rules accurately and efficiently. +- Build: **Improved Rule Editing Experience** + - Enhanced copy & paste functionality in rule editors boosts productivity by making it easier to manage and edit rules accurately and efficiently. -- Explore and Autor: **Streamlined Shape Management** - - The introduction of new SHACL shape quick-access options empowers users to effortlessly build, validate, and troubleshoot complex shape configuration. +- Explore and Autor: **Streamlined Shape Management** + - The introduction of new SHACL shape quick-access options empowers users to effortlessly build, validate, and troubleshoot complex shape configuration. -- Automate: **Lightning-fast Parameterized Queries** - - The new `cmemc` query placeholder specifications enable super-fast execution of parameterized queries by running background value queries to provide dynamic completions, significantly enhancing data query responsiveness. +- Automate: **Lightning-fast Parameterized Queries** + - The new `cmemc` query placeholder specifications enable super-fast execution of parameterized queries by running background value queries to provide dynamic completions, significantly enhancing data query responsiveness. !!! info inline end "Important info" @@ -30,14 +30,14 @@ The highlights of this release are: This release delivers the following component versions: -- eccenca DataIntegration v25.1.1 -- eccenca Explore v25.1.2 (formerly DataPlatform and DataManager) -- eccenca Corporate Memory Control (cmemc) v25.1.1 +- eccenca DataIntegration v25.1.1 +- eccenca Explore v25.1.2 (formerly DataPlatform and DataManager) +- eccenca Corporate Memory Control (cmemc) v25.1.1 We tested this release with the following dependency components: -- Ontotext GraphDB v10.8.3 -- Keycloak v25.0.6 +- Ontotext GraphDB v10.8.3 +- Keycloak v25.0.6 More detailed information for this release is provided in the next sections. @@ -48,46 +48,46 @@ We're excited to bring you the latest update to DataIntegration v25.1, which int **v25.1.1 of DataIntegration ships the following fixes and additions:** - OIDC - - added support for Request Party-Initiated Logout as specified by OpenID Connect + - added support for Request Party-Initiated Logout as specified by OpenID Connect - S3 - - Now defaults to the AWS credentials provider chain if S3 access and secret keys are not configured. + - Now defaults to the AWS credentials provider chain if S3 access and secret keys are not configured. - Python - - Prevented redundant re-imports of already loaded modules. - - Updated `FileEntitySchema` in `cmem-plugin-base` for improved compatibility with datasets. + - Prevented redundant re-imports of already loaded modules. + - Updated `FileEntitySchema` in `cmem-plugin-base` for improved compatibility with datasets. - RegexExtractionTransformer - - Resolved an issue that incorrectly generated null values. + - Resolved an issue that incorrectly generated null values. **v25.1.0 of DataIntegration adds the following new features:** -- Use colors for workbench tags. -- Added a new operator for concatenating input values into a file. -- Enabled copy & paste functionality in rule editors. -- Datasets with explicit schemas can now be directly connected to workflow operators. - - Supported for CSV and text datasets. - - If a supported dataset is connected to a workflow operator with a flexible input schema, the entire dataset (i.e., all properties of its primary type) is read. - - For CSV datasets, this results in entities being read with all columns included. -- Allow changing the width of blocks in the mapping editor. +- Use colors for workbench tags. +- Added a new operator for concatenating input values into a file. +- Enabled copy & paste functionality in rule editors. +- Datasets with explicit schemas can now be directly connected to workflow operators. + - Supported for CSV and text datasets. + - If a supported dataset is connected to a workflow operator with a flexible input schema, the entire dataset (i.e., all properties of its primary type) is read. + - For CSV datasets, this results in entities being read with all columns included. +- Allow changing the width of blocks in the mapping editor. **v25.1.0 of DataIntegration introduces the following changes:** -- Invisible parameters are now part of the config port schema. -- Improved file names for downloaded projects and workspaces. -- SPARQL results are streamed as JSON instead of XML. -- The root breadcrumb and the _Build_ logo in the navigation sidebar now direct to the _projects_ search facet instead of _All types_. +- Invisible parameters are now part of the config port schema. +- Improved file names for downloaded projects and workspaces. +- SPARQL results are streamed as JSON instead of XML. +- The root breadcrumb and the _Build_ logo in the navigation sidebar now direct to the _projects_ search facet instead of _All types_. **v25.1.0 of DataIntegration ships the following fixes:** -- Fixed URI rule evaluation failure for empty object mappings. -- No duplicate JDBC jar configuration is required anymore. -- Fixed issue with JSON datasets not always navigating into arrays. -- Fixed issue where direct transform execution does not use project variables. -- Fixed Transform Evaluation failure when a rule contains a template transformer. -- Fixed issue where URI pattern input sometimes resets to its initial value or crashes the mapping editor. -- Fixed issue where SPARQL restriction expands the wrong SPARQL pattern when using property paths with prefixed names. -- Fixed RDF file upload issue. -- Fixed issue where the reference entities cache fails to load a large number of entities from the RDF store. -- Fixed issue where tasks created in the workflow editor are not added to the recently viewed list. -- Fixed issue where adding a note to a linking rule fails to save. +- Fixed URI rule evaluation failure for empty object mappings. +- No duplicate JDBC jar configuration is required anymore. +- Fixed issue with JSON datasets not always navigating into arrays. +- Fixed issue where direct transform execution does not use project variables. +- Fixed Transform Evaluation failure when a rule contains a template transformer. +- Fixed issue where URI pattern input sometimes resets to its initial value or crashes the mapping editor. +- Fixed issue where SPARQL restriction expands the wrong SPARQL pattern when using property paths with prefixed names. +- Fixed RDF file upload issue. +- Fixed issue where the reference entities cache fails to load a large number of entities from the RDF store. +- Fixed issue where tasks created in the workflow editor are not added to the recently viewed list. +- Fixed issue where adding a note to a linking rule fails to save. ## eccenca Explore v25.1.2 @@ -100,79 +100,79 @@ We are excited to announce Explore v25.1, which introduces new features, improve **v25.1.2 of Explore ships the following fixes:** -- Spring Boot Gradle plugin patch upgrade to address CVE-2025-31651 -- Consider owl import resolution in QUERY CBD resolution strategy -- Link Rules - Rule Setup: parameters of paths retain values, changes on save are shown, even if requests in the background still run. -- Fix ACL Management rights for writing access conditions +- Spring Boot Gradle plugin patch upgrade to address CVE-2025-31651 +- Consider owl import resolution in QUERY CBD resolution strategy +- Link Rules - Rule Setup: parameters of paths retain values, changes on save are shown, even if requests in the background still run. +- Fix ACL Management rights for writing access conditions **v25.1.1 of Explore ships the following fixes:** -- OIDC - - Add deployment property for post logout redirect uri -- Shacl - - Disable adding properties if the max amount of properties is reached - - Show fields of subshapes -- BKE - - Prevent loading candidates queries for readonly properties - - Switching between nodeshapes +- OIDC + - Add deployment property for post logout redirect uri +- Shacl + - Disable adding properties if the max amount of properties is reached + - Show fields of subshapes +- BKE + - Prevent loading candidates queries for readonly properties + - Switching between nodeshapes **v25.1.0 of Explore adds the following new features:** -- Other - - Added support for Virtuoso 8.3: - - Uses the eccenca Docker image for GitLab CI tests. - - Includes adjustments in the store connection to address specific Virtuoso issues. -- SHACL - - Added a download option for value queries in the complex view. - - Values in the table view are now sorted by IRI by default; this can be overridden by setting `shui:disableDefaultValueSorting true`. - - Added a new SHACL form to the graph creation interface. - - Added a debug node shape option for quick access. - - Corrected the display of lists of `xsd:anyURI` literals with long URIs. -- BKE - - Improved whitespace formatting in the BKE dossier. +- Other + - Added support for Virtuoso 8.3: + - Uses the eccenca Docker image for GitLab CI tests. + - Includes adjustments in the store connection to address specific Virtuoso issues. +- SHACL + - Added a download option for value queries in the complex view. + - Values in the table view are now sorted by IRI by default; this can be overridden by setting `shui:disableDefaultValueSorting true`. + - Added a new SHACL form to the graph creation interface. + - Added a debug node shape option for quick access. + - Corrected the display of lists of `xsd:anyURI` literals with long URIs. +- BKE + - Improved whitespace formatting in the BKE dossier. **v25.1.0 of Explore ships the following changes:** -- SHACL - - Conditionally hid the remove, create, and clone buttons. - - Added support for GraphDB 10.8.3. - - Removed quad upload support for GSP and the upload endpoint +- SHACL + - Conditionally hid the remove, create, and clone buttons. + - Added support for GraphDB 10.8.3. + - Removed quad upload support for GSP and the upload endpoint (GSP quads are not supported by stores or are uploaded as triples to a single graph only). -- Link Rules - - Adjusted link rules to use the new ACL API. -- SHACL - - Stabilized the UI during loading. - - Updated graph creation forms to the current SHACL system. -- BKE - - Merged the display of relations when property shape mode is deactivated. - - Saved graph changes while preserving the visualization state. -- Query Module - - Catalogue queries are now deleted using resource deletion (CBD). -- Other - - Switched the backend build system to use _Maven Central_ instead of _Artifactory_, which also removes the blocking Virtuoso dependency. - - Added `POST` endpoints for `GET` data requests that may result in long IRIs. - - Updated to Spring Boot 3.4. - - Made Apache Jena SPARQL query result streaming adjustable via the `proxy.proxy-sparql-streaming-format` configuration (default: `XML`; possible values: `JSON`, `XML`). - - Updated Apache Jena to version 5.3.0. - - Implemented a new serialization method for paged responses (currently relevant only for access condition management endpoints/clients). +- Link Rules + - Adjusted link rules to use the new ACL API. +- SHACL + - Stabilized the UI during loading. + - Updated graph creation forms to the current SHACL system. +- BKE + - Merged the display of relations when property shape mode is deactivated. + - Saved graph changes while preserving the visualization state. +- Query Module + - Catalogue queries are now deleted using resource deletion (CBD). +- Other + - Switched the backend build system to use _Maven Central_ instead of _Artifactory_, which also removes the blocking Virtuoso dependency. + - Added `POST` endpoints for `GET` data requests that may result in long IRIs. + - Updated to Spring Boot 3.4. + - Made Apache Jena SPARQL query result streaming adjustable via the `proxy.proxy-sparql-streaming-format` configuration (default: `XML`; possible values: `JSON`, `XML`). + - Updated Apache Jena to version 5.3.0. + - Implemented a new serialization method for paged responses (currently relevant only for access condition management endpoints/clients). **v25.1.0 of Explore ships the following fixes:** -- Other - - Render node shapes without property shapes correctly—that is, display their widgets. - - The root admin username now resolves to the actual account name rather than a fixed `admin`. - - GSP file uploads via multipart requests now allow file suffixes in uppercase. - - Re-enabled Prometheus cache metrics. - - URIs with escaped characters are now preserved. - - Added an indication for broken workspace configurations in the UI. - - Fixed missing translations in messages prompting necessary re-login. -- SHACL - - Resolved the `shuiObject` placeholder correctly in custom queries when a resource is created. - - In Shacline, subshapes now have a cutoff of 20; a warning is displayed when the limit is reached. - - Fixed an issue where adding a new subshape caused unwanted duplication of existing subshapes of the same type. - - Subshapes are now removed if their removal is revoked. -- BusinessKnowledgeEditor (BKE) - - Added support for value queries in BKE. +- Other + - Render node shapes without property shapes correctly—that is, display their widgets. + - The root admin username now resolves to the actual account name rather than a fixed `admin`. + - GSP file uploads via multipart requests now allow file suffixes in uppercase. + - Re-enabled Prometheus cache metrics. + - URIs with escaped characters are now preserved. + - Added an indication for broken workspace configurations in the UI. + - Fixed missing translations in messages prompting necessary re-login. +- SHACL + - Resolved the `shuiObject` placeholder correctly in custom queries when a resource is created. + - In Shacline, subshapes now have a cutoff of 20; a warning is displayed when the limit is reached. + - Fixed an issue where adding a new subshape caused unwanted duplication of existing subshapes of the same type. + - Subshapes are now removed if their removal is revoked. +- BusinessKnowledgeEditor (BKE) + - Added support for value queries in BKE. ## eccenca Corporate Memory Control (cmemc) v25.1.1 @@ -180,40 +180,39 @@ We're excited to bring you the latest update to Corporate Memory Control (cmemc) **v25.1.1 of cmemc introduces the following changes:** -- corrected target versions -- corrected migration target versions +- corrected target versions +- corrected migration target versions **v25.1.0 of cmemc adds the following new features and change behaviour:** -- `query execute` command - - shell completion of placeholder values (using annotated QueryPlaceholder resources) -- `admin workspace python reload` command - - reload / register all installed plugins into the DataIntegration workspace -- `admin workspace python list-plugins` command - - will warn now if plugins are installed but not registered -- `admin migration` command group - - `hide-header-footer-25.1` migration recipe - - Remove triples using deprecated shui:valueQueryHideHeader|Footer terms -- `query execute` command - - in case the user does not request a specific content type, some results are shown as a table (instead of `text/csv`) - - `--accept` option now has completion support -- base command - - `--external-http-timeout` option to specify the timeout for non-CMEM HTTP requests -- configuration via INI config file - - allow debug and proxy settings for a connection - - allow settings in the `DEFAULT` section of the config file for all connections -- change in configuration loading order, to integrate the values from the `DEFAULT` section: - 1. load environment variables into options-dict (click is doing this for `CMEMC_` variables) - 2. load options from command line and overwrite environment (only `CMEMC_` variables) - 3. load `DEFAULT` value keys, but not for keys which are already set (i.e not override cli options or env variables) - 4. load named INI section values (in case there is an INI section given) -> this will not overwrite everything - 5. use API defaults if there are not enough config keys (use default `CMEM_BASE_URI`, default `OAUTH_GRANT_TYPE` and default `OAUTH_CLIENT_ID`|`SECRET` if not present) - +- `query execute` command + - shell completion of placeholder values (using annotated QueryPlaceholder resources) +- `admin workspace python reload` command + - reload / register all installed plugins into the DataIntegration workspace +- `admin workspace python list-plugins` command + - will warn now if plugins are installed but not registered +- `admin migration` command group + - `hide-header-footer-25.1` migration recipe + - Remove triples using deprecated shui:valueQueryHideHeader|Footer terms +- `query execute` command + - in case the user does not request a specific content type, some results are shown as a table (instead of `text/csv`) + - `--accept` option now has completion support +- base command + - `--external-http-timeout` option to specify the timeout for non-CMEM HTTP requests +- configuration via INI config file + - allow debug and proxy settings for a connection + - allow settings in the `DEFAULT` section of the config file for all connections +- change in configuration loading order, to integrate the values from the `DEFAULT` section: + 1. load environment variables into options-dict (click is doing this for `CMEMC_` variables) + 2. load options from command line and overwrite environment (only `CMEMC_` variables) + 3. load `DEFAULT` value keys, but not for keys which are already set (i.e not override cli options or env variables) + 4. load named INI section values (in case there is an INI section given) -> this will not overwrite everything + 5. use API defaults if there are not enough config keys (use default `CMEM_BASE_URI`, default `OAUTH_GRANT_TYPE` and default `OAUTH_CLIENT_ID`|`SECRET` if not present) **In addition the following changes and fixes are included:** -- `admin workspace python uninstall` command - - shell completion uses correct connection now +- `admin workspace python uninstall` command + - shell completion uses correct connection now ## Migration Notes @@ -232,5 +231,5 @@ We're excited to bring you the latest update to Corporate Memory Control (cmemc) ### eccenca Corporate Memory Control (cmemc) -- `query execute` command - - use `--accept` in case you need explicit CSV output +- `query execute` command + - use `--accept` in case you need explicit CSV output diff --git a/docs/release-notes/corporate-memory-25-2/index.md b/docs/release-notes/corporate-memory-25-2/index.md index b8ea23bc1..9f1fa6ca1 100644 --- a/docs/release-notes/corporate-memory-25-2/index.md +++ b/docs/release-notes/corporate-memory-25-2/index.md @@ -14,17 +14,17 @@ Corporate Memory 25.2 is the second major release in 2025. The highlights of this release are: -- Build: **Enhanced File Management in Workflows** - - New binary file dataset and project file operators enable seamless integration of PDFs, images, and other binary files directly into workflows, streamlining document processing pipelines. +- Build: **Enhanced File Management in Workflows** + - New binary file dataset and project file operators enable seamless integration of PDFs, images, and other binary files directly into workflows, streamlining document processing pipelines. -- Explore: **Dynamic Class and Property Creation** - - Create classes and properties on-the-fly while defining SHACL shapes, dramatically accelerating ontology development and data modeling workflows without context switching. +- Explore: **Dynamic Class and Property Creation** + - Create classes and properties on-the-fly while defining SHACL shapes, dramatically accelerating ontology development and data modeling workflows without context switching. -- Explore and Automate: **Multi-Graph Query Management** - - The enhanced query catalog now supports multiple query graphs and arbitrary graph selection, enabling better organization and management of SPARQL queries across different knowledge domains. +- Explore and Automate: **Multi-Graph Query Management** + - The enhanced query catalog now supports multiple query graphs and arbitrary graph selection, enabling better organization and management of SPARQL queries across different knowledge domains. -- Build: **Mapping Creator** (BETA) - - New visual mapping management and GenAI based mapping environment, allowing unparalleled clarity, speed and ease in building and maintaining your mapping rules. +- Build: **Mapping Creator** (BETA) + - New visual mapping management and GenAI based mapping environment, allowing unparalleled clarity, speed and ease in building and maintaining your mapping rules. !!! info inline end "Important info" @@ -32,14 +32,14 @@ The highlights of this release are: This release delivers the following component versions: -- eccenca DataIntegration v25.2.2 -- eccenca Explore v25.2.6 -- eccenca Corporate Memory Control (cmemc) v25.4.0 +- eccenca DataIntegration v25.2.2 +- eccenca Explore v25.2.6 +- eccenca Corporate Memory Control (cmemc) v25.4.0 We tested this release with the following dependency components: -- Ontotext GraphDB v11.0.2 -- Keycloak v25.0.6 +- Ontotext GraphDB v11.0.2 +- Keycloak v25.0.6 More detailed information for this release is provided in the next sections. @@ -49,49 +49,49 @@ We are excited to announce the release of DataIntegration v25.2.2, which introdu **v25.2.2 of DataIntegration ships the following fixes and additions:** -- Added optional parameter to **Get project files** operator to set the MIME type of the retrieved files, required for uploading RDF files to a Knowledge Graph dataset +- Added optional parameter to **Get project files** operator to set the MIME type of the retrieved files, required for uploading RDF files to a Knowledge Graph dataset **v25.2.1 of DataIntegration ships the following fixes and additions:** -- Icon button tooltips do not pop up out of control -- Edge markers are displayed in workflow editor -- If a project is imported to the knowledge graph backend, the graph is deleted before import +- Icon button tooltips do not pop up out of control +- Edge markers are displayed in workflow editor +- If a project is imported to the knowledge graph backend, the graph is deleted before import **v25.2.0 of DataIntegration adds the following new features:** -- New operators and dataset for improved file handling in workflows: - - **Add project files** workflow operator - Add files to projects directly from workflows - - **Get project files** workflow operator - Retrieve and process project files within workflow executions - - **Binary file dataset** - Handle binary files (PDF, images, etc.) in data integration pipelines -- **Neo4j database configuration** - Added parameter to configure specific databases in Neo4j connections -- **Project variable autocompletion** - All template operators now support autocompletion for project variables -- **Camel case transform operator** - Convert text to camel case format for data standardization -- **Project page URL suffix configuration** - New config key `workbench.project.defaultUrlSuffix` to configure the project page view (defaults to `?itemType=workflow&page=1&limit=10`) -- **Path auto-completion** - Mapping and linking rule editors now feature intelligent path auto-completion like in value mapping forms +- New operators and dataset for improved file handling in workflows: + - **Add project files** workflow operator - Add files to projects directly from workflows + - **Get project files** workflow operator - Retrieve and process project files within workflow executions + - **Binary file dataset** - Handle binary files (PDF, images, etc.) in data integration pipelines +- **Neo4j database configuration** - Added parameter to configure specific databases in Neo4j connections +- **Project variable autocompletion** - All template operators now support autocompletion for project variables +- **Camel case transform operator** - Convert text to camel case format for data standardization +- **Project page URL suffix configuration** - New config key `workbench.project.defaultUrlSuffix` to configure the project page view (defaults to `?itemType=workflow&page=1&limit=10`) +- **Path auto-completion** - Mapping and linking rule editors now feature intelligent path auto-completion like in value mapping forms **v25.2.0 of DataIntegration introduces the following changes:** -- **Infrastructure updates:** - - Migrated to Java 21 for improved performance and latest language features - - Updated Docker base image to `eclipse-temurin:21-ubi9-minimal` -- **"Internal dataset (single graph)"** added to plugins to properly display reports using this dataset type -- **Configurable favicon** - Organizations can customize the application favicon -- **JSON dataset improvements:** - - New parameter to control automatic navigation into JSON arrays - - New `#arrayPath` path operator for explicit navigation into JSON arrays (available when automatic JSON array navigation is set to `false`) - - New `#uuid` path operator generates type 3 (name-based) UUIDs from JSON node string representations - - New `#arrayText` path operator for enhanced array value extraction +- **Infrastructure updates:** + - Migrated to Java 21 for improved performance and latest language features + - Updated Docker base image to `eclipse-temurin:21-ubi9-minimal` +- **"Internal dataset (single graph)"** added to plugins to properly display reports using this dataset type +- **Configurable favicon** - Organizations can customize the application favicon +- **JSON dataset improvements:** + - New parameter to control automatic navigation into JSON arrays + - New `#arrayPath` path operator for explicit navigation into JSON arrays (available when automatic JSON array navigation is set to `false`) + - New `#uuid` path operator generates type 3 (name-based) UUIDs from JSON node string representations + - New `#arrayText` path operator for enhanced array value extraction **v25.2.0 of DataIntegration ships the following fixes:** -- Fixed queries with ORDER BY clauses in SQL dataset -- Fixed create task dialog focus issues when opened via 'connect to newly created...' menu option -- Fixed errors in Office 365 dataset tests and adapted to Microsoft API changes -- Fixed display issues for workflow reports containing internal datasets -- Fixed drag-and-drop problems when adding operators to nested workflow editors -- Non-printable characters in CSV datasets are now preserved during read/write transformations -- XML datasets now return empty values for empty tags when string values are expected -- Project variable updates now properly use the triggering user's credentials +- Fixed queries with ORDER BY clauses in SQL dataset +- Fixed create task dialog focus issues when opened via 'connect to newly created...' menu option +- Fixed errors in Office 365 dataset tests and adapted to Microsoft API changes +- Fixed display issues for workflow reports containing internal datasets +- Fixed drag-and-drop problems when adding operators to nested workflow editors +- Non-printable characters in CSV datasets are now preserved during read/write transformations +- XML datasets now return empty values for empty tags when string values are expected +- Project variable updates now properly use the triggering user's credentials ## eccenca Explore v25.2.6 @@ -99,81 +99,81 @@ We are pleased to announce Explore v25.2.6, which brings significant enhancement **v25.2.6 of Explore ships the following fix:** -- Removal of sub dependency redis -- Update Spring Boot to 3.5.7 +- Removal of sub dependency redis +- Update Spring Boot to 3.5.7 **v25.2.6 of Explore ships the following fix:** -- Removal of sub dependency redis -- Update Spring Boot to 3.5.7 +- Removal of sub dependency redis +- Update Spring Boot to 3.5.7 **v25.2.5 of Explore ships the following fix:** -- Anonymous classes are not considered when resolving the `rdfs:subClassOf` based class hierarchy +- Anonymous classes are not considered when resolving the `rdfs:subClassOf` based class hierarchy **v25.2.4 of Explore ships the following fix:** -- Add missing property for setting lifetime of stale (facet) caches +- Add missing property for setting lifetime of stale (facet) caches **v25.2.3 of Explore ships the following fixes and additions:** -- Protect charts module with the action 'Explore-ChartsCatalog' -- Fix critical Jinjava vulnerability +- Protect charts module with the action 'Explore-ChartsCatalog' +- Fix critical Jinjava vulnerability **v25.2.2 of Explore ships the following fixes and additions:** -- Update the gui-elements version to solve the edges cut problem +- Update the gui-elements version to solve the edges cut problem **v25.2.1 of Explore ships the following fixes and additions:** -- Pagination for inline views of Dataset (used for preview in cmem build) -- Widgets for shaped resources without properties to display are shown -- Update/Replace in the explore graph list +- Pagination for inline views of Dataset (used for preview in cmem build) +- Widgets for shaped resources without properties to display are shown +- Update/Replace in the explore graph list **v25.2.0 of Explore adds the following new features:** -- **Enhanced SHACL Shape Management:** - - Create target classes for node shapes on-the-fly during shape definition - - Create properties for property shapes on-the-fly without leaving the shape editor - - Create classes for property shapes on-the-fly for better data modeling - - Support for defining properties with `domainIncludes` and `rangeIncludes` predicates (as defined in either `schema:`,`dcam:` or `gist:`) -- **Query Catalog Enhancements:** - - Graph selection support for Query Catalog, allowing multiple query catalog graphs and editing queries in arbitrary graphs - - Graph selection support for Charts visualization, allowing to store and edit chart visualization in arbitrary graphs -- **GraphDB 11.0.x Support** - Full compatibility with the latest GraphDB version -- **Unified Error Handling** - New RTKAction handler provides consistent error handling across the application +- **Enhanced SHACL Shape Management:** + - Create target classes for node shapes on-the-fly during shape definition + - Create properties for property shapes on-the-fly without leaving the shape editor + - Create classes for property shapes on-the-fly for better data modeling + - Support for defining properties with `domainIncludes` and `rangeIncludes` predicates (as defined in either `schema:`,`dcam:` or `gist:`) +- **Query Catalog Enhancements:** + - Graph selection support for Query Catalog, allowing multiple query catalog graphs and editing queries in arbitrary graphs + - Graph selection support for Charts visualization, allowing to store and edit chart visualization in arbitrary graphs +- **GraphDB 11.0.x Support** - Full compatibility with the latest GraphDB version +- **Unified Error Handling** - New RTKAction handler provides consistent error handling across the application **v25.2.0 of Explore introduces the following changes:** -- **Infrastructure Updates:** - - Upgraded to Spring Boot 3.5.x and Apache Jena 5.4 - - Migrated to Java 21 runtime for improved performance -- **Timetracker Module** - Complete rework of the Timetracker and reports module for better performance and usability -- **Knowledge Graph Editor (BKE) Improvements:** - - Updated to React Flow v12 for enhanced graph visualization - - Automatic canvas scrolling when dragging items beyond visible area - - Advanced multi-select functionality on canvas for bulk operations -- **SPARQL Query Endpoints** - Changed to use an explicit list of allowed content types for better security -- **Catalog Query Management** - Update and SELECT queries are now differentiated by `rdf:type` using `shui:SparqlQuery` or `shui:UpdateQuery` +- **Infrastructure Updates:** + - Upgraded to Spring Boot 3.5.x and Apache Jena 5.4 + - Migrated to Java 21 runtime for improved performance +- **Timetracker Module** - Complete rework of the Timetracker and reports module for better performance and usability +- **Knowledge Graph Editor (BKE) Improvements:** + - Updated to React Flow v12 for enhanced graph visualization + - Automatic canvas scrolling when dragging items beyond visible area + - Advanced multi-select functionality on canvas for bulk operations +- **SPARQL Query Endpoints** - Changed to use an explicit list of allowed content types for better security +- **Catalog Query Management** - Update and SELECT queries are now differentiated by `rdf:type` using `shui:SparqlQuery` or `shui:UpdateQuery` **v25.2.0 of Explore ships the following fixes:** -- **Query Catalog:** - - Fixed SPARQL Query editor behavior after "save as" operation - - Improved error handling in Query Catalog API -- **Knowledge Graph Editor (BKE):** - - Property shape descriptions now consistently display as tooltips - - Fixed selection issues with expanded nodes - - Fixed greyed-out entries in Initial Search & Explore Navigation Box - - Resolved highlight lag issues for better performance -- **General Fixes:** - - Added warning when "New graph from File" overwrites existing graphs - - Empty node shapes are now properly hidden - - Fixed SHACL Edit Validation Button stability issues - - Fixed SHACL MaxCount property behavior - - Resolved duplicate entries in ResourceManager table - - Fixed broken resource selection for domain and range when graphs contain complex classes - - Enabled empty GSP multipart file uploads +- **Query Catalog:** + - Fixed SPARQL Query editor behavior after "save as" operation + - Improved error handling in Query Catalog API +- **Knowledge Graph Editor (BKE):** + - Property shape descriptions now consistently display as tooltips + - Fixed selection issues with expanded nodes + - Fixed greyed-out entries in Initial Search & Explore Navigation Box + - Resolved highlight lag issues for better performance +- **General Fixes:** + - Added warning when "New graph from File" overwrites existing graphs + - Empty node shapes are now properly hidden + - Fixed SHACL Edit Validation Button stability issues + - Fixed SHACL MaxCount property behavior + - Resolved duplicate entries in ResourceManager table + - Fixed broken resource selection for domain and range when graphs contain complex classes + - Enabled empty GSP multipart file uploads ## eccenca Corporate Memory Control (cmemc) v25.4.0 @@ -186,36 +186,36 @@ We are excited to announce cmemc v25.4.0, which introduces new features, improve **v25.4.0 of cmemc adds the following new features:** -- `query` command group - - can be used with arbitrary query graphs now - - `query list` command - new `--catalog-graph` option to select query catalog - - `query execute` command - new `--catalog-graph` option to select query catalog - - `query open` command - new `--catalog-graph` option to select query catalog +- `query` command group + - can be used with arbitrary query graphs now + - `query list` command - new `--catalog-graph` option to select query catalog + - `query execute` command - new `--catalog-graph` option to select query catalog + - `query open` command - new `--catalog-graph` option to select query catalog **v25.3.0 of cmemc adds the following new features:** -- `dataset create` command - - support for binary file datasets - - suggest pdf, png, jpg, jpeg, gif and tiff files as binary file dataset - - shell completion of these files -- `workflow io` command - - support for binary file datasets - - accept `application/octet-stream` as mime type for input and output files - - shell completion of pdf, png, jpg, jpeg, gif and tiff files as input and output - - add support for markdown documents as text datasets +- `dataset create` command + - support for binary file datasets + - suggest pdf, png, jpg, jpeg, gif and tiff files as binary file dataset + - shell completion of these files +- `workflow io` command + - support for binary file datasets + - accept `application/octet-stream` as mime type for input and output files + - shell completion of pdf, png, jpg, jpeg, gif and tiff files as input and output + - add support for markdown documents as text datasets **v25.2.0 of cmemc adds the following new features:** -- `graph imports` command group - - `graph imports create` command - Add graph import to a graph - - `graph imports delete` command - Delete graph import from a graph - - `graph imports list` command - List accessible graph's imports -- `graph export` command - - `--include-import-statements` option to save a `*.imports` file preserving imports of a graph -- `graph import` command - - `--include-import-statements` option to read the `*.imports` files and add the preserved imports to the store -- `graph delete` command - - `--include-import-statements` option to delete imports from other graphs to the deleted graph +- `graph imports` command group + - `graph imports create` command - Add graph import to a graph + - `graph imports delete` command - Delete graph import from a graph + - `graph imports list` command - List accessible graph's imports +- `graph export` command + - `--include-import-statements` option to save a `*.imports` file preserving imports of a graph +- `graph import` command + - `--include-import-statements` option to read the `*.imports` files and add the preserved imports to the store +- `graph delete` command + - `--include-import-statements` option to delete imports from other graphs to the deleted graph ## Migration Notes @@ -234,20 +234,20 @@ We are excited to announce cmemc v25.4.0, which introduces new features, improve ### eccenca DataIntegration -- The following plugins have been deprecated and will be removed in a future release: - - Old Python plugins depending on Jython (Python 2.x) - - Spark scripting plugins - - Spark virtual dataset - - Legacy REST operator -- To check if your instance uses any deprecated plugins, use the endpoint: `GET {DataIntegrationURL}/api/core/usages/deprecatedPlugins` +- The following plugins have been deprecated and will be removed in a future release: + - Old Python plugins depending on Jython (Python 2.x) + - Spark scripting plugins + - Spark virtual dataset + - Legacy REST operator +- To check if your instance uses any deprecated plugins, use the endpoint: `GET {DataIntegrationURL}/api/core/usages/deprecatedPlugins` ### eccenca Explore -- **Query Catalog Query Type Changes** - Catalog managed queries no longer persist `shui:queryType`. Update and SELECT queries are now differentiated by `rdf:type`: - - SELECT queries use `shui:SparqlQuery` - - UPDATE queries use `shui:UpdateQuery` +- **Query Catalog Query Type Changes** - Catalog managed queries no longer persist `shui:queryType`. Update and SELECT queries are now differentiated by `rdf:type`: + - SELECT queries use `shui:SparqlQuery` + - UPDATE queries use `shui:UpdateQuery` ### eccenca Corporate Memory Control (cmemc) -- With the introduction of the `graph imports` command group, the `graph tree` command is now deprecated. - - use `graph imports tree` instead +- With the introduction of the `graph imports` command group, the `graph tree` command is now deprecated. + - use `graph imports tree` instead diff --git a/docs/release-notes/corporate-memory-25-3/index.md b/docs/release-notes/corporate-memory-25-3/index.md index 6c8d1d73c..0b628cf67 100644 --- a/docs/release-notes/corporate-memory-25-3/index.md +++ b/docs/release-notes/corporate-memory-25-3/index.md @@ -13,19 +13,18 @@ Corporate Memory 25.3 is the third major release in 2025. ![25.3: Explore - Graph Insights](25-3-explore-graph-insights.png "25.3: Explore - Graph Insights"){ class="bordered" } - The highlights of this release are: -- Build: **Mapping Creator** - - Experience a **next-generation, GenAI-driven visual mapping tool** that redefines how you build and manage mapping rules—with unparalleled clarity, speed, and simplicity. +- Build: **Mapping Creator** + - Experience a **next-generation, GenAI-driven visual mapping tool** that redefines how you build and manage mapping rules—with unparalleled clarity, speed, and simplicity. -- Explore: **Companion Chat-Based Data Interaction** - - Introducing an **LLM-powered conversational interface** that lets you interact directly with your data. +- Explore: **Companion Chat-Based Data Interaction** + - Introducing an **LLM-powered conversational interface** that lets you interact directly with your data. Ask questions about your graphs, explore insights using query catalogs or autogenerated queries, and access Corporate Memory resources and modules—all within chat. This feature takes data accessibility and interaction to an entirely new level. -- Explore: **Graph Insights** - - Visually explore your graphs contents and relationships in an **aggregated representation** that scales easily to graphs with millions of nodes. +- Explore: **Graph Insights** + - Visually explore your graphs contents and relationships in an **aggregated representation** that scales easily to graphs with millions of nodes. Offering a **compact and flexible overview** aggregating objects and relations and shows details on demand. - diff --git a/docs/testing.md b/docs/testing.md index f477b1661..8827a813b 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -17,9 +17,9 @@ Current issues: ### Example from Geometric mean: 1 -* Weights: [1, 2, 1] -* Input values: [0.0, 0.0, 0.0] -* Returns: `0.0` +- Weights: [1, 2, 1] +- Input values: [0.0, 0.0, 0.0] +- Returns: `0.0` !!! example "Example from Geometric mean: 1" @@ -52,11 +52,11 @@ Current issues: ### Example from Handle missing values: Outputs the default score, if no input score is provided -* Parameters - * *defaultValue*: `1.0` +- Parameters + - *defaultValue*: `1.0` -* Input values: [(none)] -* Returns: `1.0` +- Input values: [(none)] +- Returns: `1.0` !!! example "Outputs the default score, if no input score is provided" @@ -80,11 +80,11 @@ Current issues: ### Example from Date: Returns 0 if both dates are equal -* Input values: - * Source: `[2003-03-01]` - * Target: `[2003-03-01]` +- Input values: + - Source: `[2003-03-01]` + - Target: `[2003-03-01]` -* Returns: → `0.0` +- Returns: → `0.0` !!! example "Returns 0 if both dates are equal" @@ -105,25 +105,25 @@ Current issues: ### Example from Camel Case: A sentence with several words is converted to a single word written in UpperCamelCase -* Parameters - * *isDromedary*: `false` +- Parameters + - *isDromedary*: `false` -* Input values: +- Input values: 1. `[hello world]` -* Returns: +- Returns: → `[HelloWorld]` ### Example from Coalesce: 5 -* Input values: +- Input values: 1. `[]` 2. `[first A, first B]` 3. `[second]` -* Returns: +- Returns: → `[first A, first B]` @@ -150,32 +150,32 @@ Current issues: ### Example of Concatenate: 4 -* Parameters - * *glue*: `-` +- Parameters + - *glue*: `-` -* Input values: +- Input values: 1. `[First]` 2. `[Last]` -* Returns: +- Returns: → `[First-Last]` ### Example of Concatenate multiple values: 6 -* Parameters - * *glue*: `\n\t\\` +- Parameters + - *glue*: `\n\t\\` -* Input values: +- Input values: 1. `[a - \b, c]` + \b, c]` -* Returns: +- Returns: → `[a - \b - \c]` + \b + \c]` !!! example "Example 5" diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md index fa07c3eb1..86a02e21b 100644 --- a/docs/tutorials/index.md +++ b/docs/tutorials/index.md @@ -14,4 +14,3 @@ hide: - From 3d01fa3984f7625139017c93b4d6b053c824f35c Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Wed, 17 Dec 2025 11:22:20 +0100 Subject: [PATCH 03/17] switch to rumdl for md linting --- .gitignore | 2 +- .markdownlint.jsonc | 14 +----- Taskfile.yml | 22 +++++++--- poetry.lock | 105 +++++++++----------------------------------- pyproject.toml | 1 + 5 files changed, 39 insertions(+), 105 deletions(-) diff --git a/.gitignore b/.gitignore index e983c9000..0c429e9da 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,4 @@ tmp .DS_Store .vscode/ .python-version - +dist/ diff --git a/.markdownlint.jsonc b/.markdownlint.jsonc index 947843271..d51f7bef9 100644 --- a/.markdownlint.jsonc +++ b/.markdownlint.jsonc @@ -1,22 +1,15 @@ { - // Example markdownlint JSON(C) configuration with all properties set to their default value - // Default state for all rules "default": true, - // not enforcing line break a specific lenght "MD013": false, "MD007": { "indent": 4 }, - // allow Multiple headings with the same content "MD024": false, - // allow multiple top level headings -> generated files "MD025": false, - // to be consistent with prettier "MD030": { "ul_single": 1, "ul_multi": 1 }, - // allow some html "MD033": { "allowed_elements": [ "figure", @@ -31,15 +24,10 @@ "br" ] }, - // not enfore h1 as first content of a file + "MD035": false, "MD041": false, - // allow missing img alt text "MD045": false, - // disabled due to admonition / tabs using indented blocks "MD046": false, - //"MD046": { "style": "fenced" }, - // allow non-descriptive link text, like "here" "MD059": false, - // relax table formatting rules "MD060": false } \ No newline at end of file diff --git a/Taskfile.yml b/Taskfile.yml index cf3e29b87..4325ce3e1 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -181,7 +181,7 @@ tasks: - poetry run mike serve -b {{.PUBLIC_BRANCH}} format:fix: - desc: markdownlit md files and apply possible style fixes + desc: rumdl (md-lint) and apply possible style fixes cmds: - | { @@ -190,12 +190,13 @@ tasks: # no recursion (top-level only) find . -maxdepth 1 -type f -name '*.md' -print0 - } | xargs -0 markdownlint --config .markdownlint.jsonc --fix + } | xargs -0 poetry run rumdl --config .markdownlint.jsonc check --fix - check:markdownlint: - desc: run markdownlint on md files for style issues + check:rumdl: + desc: run rumdl (md-linter) for style issues cmds: - #- mkdir -p ./dist + - mkdir -p ./dist + - rm -f ./dist/md-lint-issues.xml - | { # recurse @@ -203,5 +204,12 @@ tasks: # no recursion (top-level only) find . -maxdepth 1 -type f -name '*.md' -print0 - } | xargs -0 markdownlint --config .markdownlint.jsonc - # --json --output ./dist/markdownlint-issues.json + } | xargs -0 poetry run rumdl --config .markdownlint.jsonc check --output-format junit > ./dist/md-lint-issues.xml || true + - | + { + # recurse + find ./docs -type f -name '*.md' -print0 + + # no recursion (top-level only) + find . -maxdepth 1 -type f -name '*.md' -print0 + } | xargs -0 poetry run rumdl --config .markdownlint.jsonc check diff --git a/poetry.lock b/poetry.lock index 5288358b7..4f4bfaf3c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -6,7 +6,6 @@ version = "2.6.1" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8"}, {file = "aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558"}, @@ -18,7 +17,6 @@ version = "3.12.13" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5421af8f22a98f640261ee48aae3a37f0c41371e99412d55eaf2f8a46d5dad29"}, {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fcda86f6cb318ba36ed8f1396a6a4a3fd8f856f84d426584392083d10da4de0"}, @@ -126,7 +124,6 @@ version = "1.3.2" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, @@ -141,7 +138,6 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -153,7 +149,6 @@ version = "25.3.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, @@ -173,7 +168,6 @@ version = "2.17.0" description = "Internationalization utilities" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, @@ -188,7 +182,6 @@ version = "5.9" description = "A wrapper around re and regex that adds additional back references." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "backrefs-5.9-py310-none-any.whl", hash = "sha256:db8e8ba0e9de81fcd635f440deab5ae5f2591b54ac1ebe0550a2ca063488cd9f"}, {file = "backrefs-5.9-py311-none-any.whl", hash = "sha256:6907635edebbe9b2dc3de3a2befff44d74f30a4562adbb8b36f21252ea19c5cf"}, @@ -208,7 +201,6 @@ version = "4.13.4" description = "Screen-scraping library" optional = false python-versions = ">=3.7.0" -groups = ["main"] files = [ {file = "beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"}, {file = "beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195"}, @@ -231,7 +223,6 @@ version = "2.6" description = "Bash style brace expander." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "bracex-2.6-py3-none-any.whl", hash = "sha256:0b0049264e7340b3ec782b5cb99beb325f36c3782a32e36e876452fd49a09952"}, {file = "bracex-2.6.tar.gz", hash = "sha256:98f1347cd77e22ee8d967a30ad4e310b233f7754dbf31ff3fceb76145ba47dc7"}, @@ -243,7 +234,6 @@ version = "1.7.1" description = "cffi-based cairo bindings for Python" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "cairocffi-1.7.1-py3-none-any.whl", hash = "sha256:9803a0e11f6c962f3b0ae2ec8ba6ae45e957a146a004697a1ac1bbf16b073b3f"}, {file = "cairocffi-1.7.1.tar.gz", hash = "sha256:2e48ee864884ec4a3a34bfa8c9ab9999f688286eb714a15a43ec9d068c36557b"}, @@ -263,7 +253,6 @@ version = "2.8.2" description = "A Simple SVG Converter based on Cairo" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "cairosvg-2.8.2-py3-none-any.whl", hash = "sha256:eab46dad4674f33267a671dce39b64be245911c901c70d65d2b7b0821e852bf5"}, {file = "cairosvg-2.8.2.tar.gz", hash = "sha256:07cbf4e86317b27a92318a4cac2a4bb37a5e9c1b8a27355d06874b22f85bef9f"}, @@ -286,7 +275,6 @@ version = "2025.6.15" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"}, {file = "certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b"}, @@ -298,7 +286,6 @@ version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, @@ -378,7 +365,6 @@ version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, @@ -480,7 +466,6 @@ version = "8.2.1" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.10" -groups = ["main"] files = [ {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, @@ -495,7 +480,6 @@ version = "25.3.0" description = "API for eccenca Corporate Memory" optional = false python-versions = "<4.0,>=3.9" -groups = ["main"] files = [ {file = "cmem_cmempy-25.3.0-py3-none-any.whl", hash = "sha256:75f9c6900661b5573615b43086897eb4b5fccdb1ec953fa9e20cdaecaeea75c2"}, {file = "cmem_cmempy-25.3.0.tar.gz", hash = "sha256:ccef1410bde7e248d4b89b37366e7c386c8a1558190a07090f0d3c11e3b16ff4"}, @@ -514,7 +498,6 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -526,7 +509,6 @@ version = "0.8.0" description = "CSS selectors for Python ElementTree" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "cssselect2-0.8.0-py3-none-any.whl", hash = "sha256:46fc70ebc41ced7a32cd42d58b1884d72ade23d21e5a4eaaf022401c13f0e76e"}, {file = "cssselect2-0.8.0.tar.gz", hash = "sha256:7674ffb954a3b46162392aee2a3a0aedb2e14ecf99fcc28644900f4e6e3e9d3a"}, @@ -546,7 +528,6 @@ version = "0.7.1" description = "XML bomb protection for Python stdlib modules" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -groups = ["main"] files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -558,7 +539,6 @@ version = "1.7.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "frozenlist-1.7.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc4df77d638aa2ed703b878dd093725b72a824c3c546c076e8fdf276f78ee84a"}, {file = "frozenlist-1.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:716a9973a2cc963160394f701964fe25012600f3d311f60c790400b00e568b61"}, @@ -672,7 +652,6 @@ version = "2.1.0" description = "Copy your docs directly to the gh-pages branch." optional = false python-versions = "*" -groups = ["main"] files = [ {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, @@ -690,7 +669,6 @@ version = "4.0.12" description = "Git Object Database" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf"}, {file = "gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571"}, @@ -705,7 +683,6 @@ version = "3.1.44" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "GitPython-3.1.44-py3-none-any.whl", hash = "sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110"}, {file = "gitpython-3.1.44.tar.gz", hash = "sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269"}, @@ -724,7 +701,6 @@ version = "3.1.0" description = "Hjson, a user interface for JSON." optional = false python-versions = "*" -groups = ["main"] files = [ {file = "hjson-3.1.0-py3-none-any.whl", hash = "sha256:65713cdcf13214fb554eb8b4ef803419733f4f5e551047c9b711098ab7186b89"}, {file = "hjson-3.1.0.tar.gz", hash = "sha256:55af475a27cf83a7969c808399d7bccdec8fb836a07ddbd574587593b9cdcf75"}, @@ -736,7 +712,6 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" -groups = ["main", "dev"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -751,7 +726,6 @@ version = "8.7.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, @@ -775,7 +749,6 @@ version = "6.5.2" description = "Read resources from Python packages" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec"}, {file = "importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c"}, @@ -795,7 +768,6 @@ version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -813,7 +785,6 @@ version = "1.4.0" description = "Check links for Markdown-based site" optional = false python-versions = ">=3.7" -groups = ["dev"] files = [ {file = "linkcheckmd-1.4.0.tar.gz", hash = "sha256:3a539c9a4e11697fc7fcc269d379accf93c8cccbf971f3cea0bae40912d9f609"}, ] @@ -832,7 +803,6 @@ version = "3.8.2" description = "Python implementation of John Gruber's Markdown." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "markdown-3.8.2-py3-none-any.whl", hash = "sha256:5c83764dbd4e00bdd94d85a19b8d55ccca20fe35b2e678a1422b380324dd5f24"}, {file = "markdown-3.8.2.tar.gz", hash = "sha256:247b9a70dd12e27f67431ce62523e675b866d254f900c4fe75ce3dda62237c45"}, @@ -848,7 +818,6 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -919,7 +888,6 @@ version = "1.3.4" description = "A deep merge function for 🐍." optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, @@ -931,7 +899,6 @@ version = "2.1.3" description = "Manage multiple versions of your MkDocs-powered documentation" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "mike-2.1.3-py3-none-any.whl", hash = "sha256:d90c64077e84f06272437b464735130d380703a76a5738b152932884c60c062a"}, {file = "mike-2.1.3.tar.gz", hash = "sha256:abd79b8ea483fb0275b7972825d3082e5ae67a41820f8d8a0dc7a3f49944e810"}, @@ -957,7 +924,6 @@ version = "1.6.1" description = "Project documentation with Markdown." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"}, {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"}, @@ -988,7 +954,6 @@ version = "0.7.1" description = "An MkDocs plugin" optional = false python-versions = ">=3.4" -groups = ["main"] files = [ {file = "mkdocs-autolinks-plugin-0.7.1.tar.gz", hash = "sha256:445ddb9b417b7795856c30801bb430773186c1daf210bdeecf8305f55a47d151"}, {file = "mkdocs_autolinks_plugin-0.7.1-py3-none-any.whl", hash = "sha256:5c6c17f6649b68e79a9ef0b2648d59f3072e18002b90ee1586a64c505f11ab12"}, @@ -1003,7 +968,6 @@ version = "2.10.1" description = "An MkDocs plugin that simplifies configuring page titles and their order" optional = false python-versions = ">=3.8.1" -groups = ["main"] files = [ {file = "mkdocs_awesome_pages_plugin-2.10.1-py3-none-any.whl", hash = "sha256:c6939dbea37383fc3cf8c0a4e892144ec3d2f8a585e16fdc966b34e7c97042a7"}, {file = "mkdocs_awesome_pages_plugin-2.10.1.tar.gz", hash = "sha256:cda2cb88c937ada81a4785225f20ef77ce532762f4500120b67a1433c1cdbb2f"}, @@ -1020,7 +984,6 @@ version = "0.2.0" description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, @@ -1037,7 +1000,6 @@ version = "1.4.7" description = "Mkdocs plugin that enables displaying the localized date of the last git modification of a markdown file." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "mkdocs_git_revision_date_localized_plugin-1.4.7-py3-none-any.whl", hash = "sha256:056c0a90242409148f1dc94d5c9d2c25b5b8ddd8de45489fa38f7fa7ccad2bc4"}, {file = "mkdocs_git_revision_date_localized_plugin-1.4.7.tar.gz", hash = "sha256:10a49eff1e1c3cb766e054b9d8360c904ce4fe8c33ac3f6cc083ac6459c91953"}, @@ -1055,7 +1017,6 @@ version = "0.4.0" description = "MkDocs plugin supports image lightbox with GLightbox." optional = false python-versions = "*" -groups = ["main"] files = [ {file = "mkdocs-glightbox-0.4.0.tar.gz", hash = "sha256:392b34207bf95991071a16d5f8916d1d2f2cd5d5bb59ae2997485ccd778c70d9"}, {file = "mkdocs_glightbox-0.4.0-py3-none-any.whl", hash = "sha256:e0107beee75d3eb7380ac06ea2d6eac94c999eaa49f8c3cbab0e7be2ac006ccf"}, @@ -1067,7 +1028,6 @@ version = "1.3.7" description = "Unleash the power of MkDocs with macros and variables" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "mkdocs_macros_plugin-1.3.7-py3-none-any.whl", hash = "sha256:02432033a5b77fb247d6ec7924e72fc4ceec264165b1644ab8d0dc159c22ce59"}, {file = "mkdocs_macros_plugin-1.3.7.tar.gz", hash = "sha256:17c7fd1a49b94defcdb502fd453d17a1e730f8836523379d21292eb2be4cb523"}, @@ -1093,7 +1053,6 @@ version = "9.6.14+insiders.4.53.16" description = "Documentation that simply works" optional = false python-versions = ">=3.8" -groups = ["main"] files = [] develop = false @@ -1118,7 +1077,7 @@ recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2. [package.source] type = "git" -url = "ssh://git@github.com/eccenca/mkdocs-material-insiders.git" +url = "git@github.com:eccenca/mkdocs-material-insiders.git" reference = "9.6.14-insiders-4.53.16" resolved_reference = "ce2cca8c5240ae520e09a67954de09949bd04efe" @@ -1128,7 +1087,6 @@ version = "1.3.1" description = "Extension pack for Python Markdown and MkDocs Material." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, @@ -1140,7 +1098,6 @@ version = "1.2.2" description = "A MkDocs plugin for dynamic page redirects to prevent broken links" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "mkdocs_redirects-1.2.2-py3-none-any.whl", hash = "sha256:7dbfa5647b79a3589da4401403d69494bd1f4ad03b9c15136720367e1f340ed5"}, {file = "mkdocs_redirects-1.2.2.tar.gz", hash = "sha256:3094981b42ffab29313c2c1b8ac3969861109f58b2dd58c45fc81cd44bfa0095"}, @@ -1155,7 +1112,6 @@ version = "0.7.1" description = "A MkDocs plugin supports for add Swagger UI in page." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "mkdocs_swagger_ui_tag-0.7.1-py3-none-any.whl", hash = "sha256:e4a1019c96ef333ec4dab0ef7d80068a345c7526a87fe8718f18852ee5ad34a5"}, {file = "mkdocs_swagger_ui_tag-0.7.1.tar.gz", hash = "sha256:aed3c5f15297d74241f38cfba4763a5789bf10a410e005014763c66e79576b65"}, @@ -1170,7 +1126,6 @@ version = "6.6.0" description = "multidict implementation" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "multidict-6.6.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d7913e6d0953b6d65c74290da65bc33d60d32a48bbe0bf2398ea1c5a2626e0b2"}, {file = "multidict-6.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8552e89a546408d3f78f1efd1c48e46077b68e59b6d5607498dd0a44df60b87c"}, @@ -1278,7 +1233,6 @@ version = "8.4.0" description = "Simple yet flexible natural sorting in Python." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c"}, {file = "natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581"}, @@ -1294,7 +1248,6 @@ version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, @@ -1306,7 +1259,6 @@ version = "0.5.7" description = "Divides large result sets into pages for easier browsing" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "paginate-0.5.7-py2.py3-none-any.whl", hash = "sha256:b885e2af73abcf01d9559fd5216b57ef722f8c42affbb63942377668e35c7591"}, {file = "paginate-0.5.7.tar.gz", hash = "sha256:22bd083ab41e1a8b4f3690544afb2c60c25e5c9a63a30fa2f483f6c60c8e5945"}, @@ -1322,7 +1274,6 @@ version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, @@ -1334,7 +1285,6 @@ version = "11.2.1" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "pillow-11.2.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047"}, {file = "pillow-11.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95"}, @@ -1434,7 +1384,6 @@ version = "4.3.8" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, @@ -1451,7 +1400,6 @@ version = "0.3.2" description = "Accelerated property cache" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:22d9962a358aedbb7a2e36187ff273adeaab9743373a272976d2e348d08c7770"}, {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0d0fda578d1dc3f77b6b5a5dce3b9ad69a8250a891760a548df850a5e8da87f3"}, @@ -1559,7 +1507,6 @@ version = "2.22" description = "C parser in Python" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, @@ -1571,7 +1518,6 @@ version = "2.11.7" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, @@ -1593,7 +1539,6 @@ version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, @@ -1705,7 +1650,6 @@ version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, @@ -1720,7 +1664,6 @@ version = "10.16" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "pymdown_extensions-10.16-py3-none-any.whl", hash = "sha256:f5dd064a4db588cb2d95229fc4ee63a1b16cc8b4d0e6145c0899ed8723da1df2"}, {file = "pymdown_extensions-10.16.tar.gz", hash = "sha256:71dac4fca63fabeffd3eb9038b756161a33ec6e8d230853d3cecf562155ab3de"}, @@ -1739,7 +1682,6 @@ version = "3.2.3" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf"}, {file = "pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be"}, @@ -1754,7 +1696,6 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -1769,7 +1710,6 @@ version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, @@ -1781,7 +1721,6 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -1844,7 +1783,6 @@ version = "1.1" description = "A custom YAML tag for referencing environment variables in YAML files." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04"}, {file = "pyyaml_env_tag-1.1.tar.gz", hash = "sha256:2eb38b75a2d21ee0475d6d97ec19c63287a7e140231e4214969d0eac923cd7ff"}, @@ -1859,7 +1797,6 @@ version = "7.1.4" description = "RDFLib is a Python library for working with RDF, a simple yet powerful language for representing information." optional = false python-versions = "<4.0.0,>=3.8.1" -groups = ["main"] files = [ {file = "rdflib-7.1.4-py3-none-any.whl", hash = "sha256:72f4adb1990fa5241abd22ddaf36d7cafa5d91d9ff2ba13f3086d339b213d997"}, {file = "rdflib-7.1.4.tar.gz", hash = "sha256:fed46e24f26a788e2ab8e445f7077f00edcf95abb73bcef4b86cefa8b62dd174"}, @@ -1881,7 +1818,6 @@ version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, @@ -1903,7 +1839,6 @@ version = "1.0.0" description = "A utility belt for advanced users of python-requests" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["main"] files = [ {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, @@ -1912,13 +1847,29 @@ files = [ [package.dependencies] requests = ">=2.0.1,<3.0.0" +[[package]] +name = "rumdl" +version = "0.0.194" +description = "A fast Markdown linter written in Rust" +optional = false +python-versions = ">=3.7" +files = [ + {file = "rumdl-0.0.194-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:dbfdf073349795d06ef9b9b1b506495ea848a0074c1645a10056fb4ec633eaea"}, + {file = "rumdl-0.0.194-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b07b2c1e8ed5b2dcba5772259d3bee0a47711341d11055e289abcb9ca4348c0b"}, + {file = "rumdl-0.0.194-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:111af9c12f4a8d7a0a69152557962fb4d0467d1ed2444588b70ad299efc5a9ad"}, + {file = "rumdl-0.0.194-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31bec50ff51ae80909de7c8ceb2355a19c7f9f046f29624337e6a21dddc06afe"}, + {file = "rumdl-0.0.194-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c14f2d0468b4b41e0900d0e269deafa69d0cea0cda7eace886987e0f21cdd53f"}, + {file = "rumdl-0.0.194-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:459f97f8527cbc8620cfbe54d239cb480622f205f26e18fb6fa00ac204d1a93b"}, + {file = "rumdl-0.0.194-py3-none-win_amd64.whl", hash = "sha256:5b18d0d2f753030ae23374b1419b4872ca583257d12c03367c3000d0d1a5ad6f"}, + {file = "rumdl-0.0.194.tar.gz", hash = "sha256:be57d294e95c3dbf112cb408204bb0fd9fc5fe6ffb7df4b93e4579b565fcc51e"}, +] + [[package]] name = "six" version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -1930,7 +1881,6 @@ version = "5.0.2" description = "A pure Python implementation of a sliding window memory map manager" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e"}, {file = "smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5"}, @@ -1942,7 +1892,6 @@ version = "2.7" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"}, {file = "soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a"}, @@ -1954,7 +1903,6 @@ version = "0.5.3" description = "file: README.md" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "super_collections-0.5.3-py3-none-any.whl", hash = "sha256:907d35b25dc4070910e8254bf2f5c928348af1cf8a1f1e8259e06c666e902cff"}, {file = "super_collections-0.5.3.tar.gz", hash = "sha256:94c1ec96c0a0d5e8e7d389ed8cde6882ac246940507c5e6b86e91945c2968d46"}, @@ -1972,7 +1920,6 @@ version = "3.1.0" description = "ANSI color formatting for output in terminal" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "termcolor-3.1.0-py3-none-any.whl", hash = "sha256:591dd26b5c2ce03b9e43f391264626557873ce1d379019786f99b0c2bee140aa"}, {file = "termcolor-3.1.0.tar.gz", hash = "sha256:6a6dd7fbee581909eeec6a756cff1d7f7c376063b14e4a298dc4980309e55970"}, @@ -1987,7 +1934,6 @@ version = "1.4.0" description = "A tiny CSS parser" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289"}, {file = "tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7"}, @@ -2006,7 +1952,6 @@ version = "4.14.0" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, @@ -2018,7 +1963,6 @@ version = "0.4.1" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, @@ -2033,7 +1977,6 @@ version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, @@ -2051,7 +1994,6 @@ version = "0.1.0" description = "Flexible version handling" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "verspec-0.1.0-py3-none-any.whl", hash = "sha256:741877d5633cc9464c45a469ae2a31e801e6dbbaa85b9675d481cda100f11c31"}, {file = "verspec-0.1.0.tar.gz", hash = "sha256:c4504ca697b2056cdb4bfa7121461f5a0e81809255b41c03dda4ba823637c01e"}, @@ -2066,7 +2008,6 @@ version = "6.0.0" description = "Filesystem events monitoring" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, @@ -2109,7 +2050,6 @@ version = "10.1" description = "Wildcard/glob file name matcher." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "wcmatch-10.1-py3-none-any.whl", hash = "sha256:5848ace7dbb0476e5e55ab63c6bbd529745089343427caa5537f230cc01beb8a"}, {file = "wcmatch-10.1.tar.gz", hash = "sha256:f11f94208c8c8484a16f4f48638a85d771d9513f4ab3f37595978801cb9465af"}, @@ -2124,7 +2064,6 @@ version = "0.5.1" description = "Character encoding aliases for legacy web content" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, @@ -2136,7 +2075,6 @@ version = "1.20.1" description = "Yet another URL library" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6032e6da6abd41e4acda34d75a816012717000fa6839f37124a47fcefc49bec4"}, {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2c7b34d804b8cf9b214f05015c4fee2ebe7ed05cf581e7192c06555c71f4446a"}, @@ -2255,7 +2193,6 @@ version = "3.23.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, @@ -2270,6 +2207,6 @@ test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_it type = ["pytest-mypy"] [metadata] -lock-version = "2.1" +lock-version = "2.0" python-versions = "^3.11" -content-hash = "a0e36e00724c0eadf029aa4d5c5358bf2942ef4cc6eb162dcb36e95a8aa7907d" +content-hash = "8dca9021c750c8ab63ab240ae05ccde92fb1998bcd1659ce19074d31610aefc9" diff --git a/pyproject.toml b/pyproject.toml index 644097b6b..ff86d0d68 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ jinja2 = "^3.1.6" [tool.poetry.group.dev.dependencies] linkcheckmd = "^1.4.0" +rumdl = "^0.0.194" [build-system] requires = ["poetry-core"] From 2bc191870e8913560a7b9746568921a4a861448f Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Wed, 17 Dec 2025 11:24:47 +0100 Subject: [PATCH 04/17] fix links and structure --- .../define-the-interfaces/index.md | 4 +- .../index.md | 7 +-- .../index.md | 62 +++++++++---------- 3 files changed, 36 insertions(+), 37 deletions(-) diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md index a0f6415c6..1aaf6f1fb 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md @@ -31,7 +31,7 @@ The first dashboard to do for our use cases is the list of IoCs with classic SPL Here, the figure 3 is nice but before this first schema during the project, there are a lot of shemas and all were minimalist and ugly often only on a whiteboard. This type schema before the technical feasibility is only to validate the objective with the analysts before starting the development. During the technical feasibility, we can decrease/increase step-by-step your objectives to show finally a first result in figure 4 in a real dashboard.
-![Figure 4. First interface with only SPARQL queries in SPLUNK static tables.](./../../link-IDS-event-to-KG/demo_ld_without_html.png) +![Figure 4. First interface with only SPARQL queries in SPLUNK static tables.](./../link-IDS-event-to-KG/demo_ld_without_html.png)
Figure 4. First interface with only SPARQL queries in SPLUNK static tables.
@@ -182,6 +182,6 @@ With the interfaces, the available data and their links in head, the analyst can Tutorial: [how to link Intrusion Detection Systems (IDS) to Open-Source INTelligence (OSINT)](../index.md) -Next chapter: [Build a Knowledge Graph from MITRE ATT&CK® datasets](./../../lift-data-from-STIX-2.1-data-of-mitre-attack/index.md) +Next chapter: [Build a Knowledge Graph from MITRE ATT&CK® datasets](./../lift-data-from-STIX-2.1-data-of-mitre-attack/index.md) Previous chapter: [Define the need, the expected result and the use cases](../define-the-need/index.md) diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md index 365a1b297..94411032a 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md @@ -10,8 +10,7 @@ The MITRE ATT&CK datasets in STIX 2.1 JSON collections are here: * [mobile-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/mobile-attack/mobile-attack.json){target=_blank} * [ics-attack.json](https://github.com/mitre-attack/attack-stix-data/blob/master/ics-attack/ics-attack.json){target=_blank} -[Structured Threat Information Expression (STIX™)]( -https://oasis-open.github.io/cti-documentation/stix/intro.html) is a language and serialization format used to exchange cyber threat intelligence (CTI). +[Structured Threat Information Expression (STIX™)](https://oasis-open.github.io/cti-documentation/stix/intro.html) is a language and serialization format used to exchange cyber threat intelligence (CTI). The "ontology" of MITRE ATT&CK with STIX is here: [https://github.com/mitre/cti/blob/master/USAGE.md](https://github.com/mitre/cti/blob/master/USAGE.md) @@ -747,10 +746,10 @@ After this tutorial, you want probably to navigate in your new knowledge graph b 4. Split the workflow in two workflows: - * "Transform all STIX data to RDF" to calculate the inferences after RDF triples + * "Transform all STIX data to RDF" to calculate the inferences after RDF triples ![](23-1-ex-workflow-STIX.png) - * "Assemble the global knowledge graph", it will import all the graphs of projects + * "Assemble the global knowledge graph", it will import all the graphs of projects ![](23-1-ex-workflow-gen.png) 5. Create a new workflow "MITRE ATT&CK® workflow" where you will insert the other workflows, like that: diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-YAML-data-of-hayabusa-sigma/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-YAML-data-of-hayabusa-sigma/index.md index e0acf66a4..ec1c1fa19 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-YAML-data-of-hayabusa-sigma/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-YAML-data-of-hayabusa-sigma/index.md @@ -111,61 +111,61 @@ This new transformer are building the following RDF model for your use case: 5. Create the transformer for "SIGMA Hayabusa rule" to build this RDF model. -Rule object: + Rule object: -- type: `ctis:Rule` + - type: `ctis:Rule` -- IRI: concatenation of "" with the result of this regular expression `^.*?([^\/]*)$` on the rule path + - IRI: concatenation of "" with the result of this regular expression `^.*?([^\/]*)$` on the rule path -![](23-1-iri-rule.png) + ![](23-1-iri-rule.png) -- property `ctis:filename` with the result of this regular expression `^.*?([^\/]*)$` on the value path `rulePath` -- property `rdfs:label` with the value path `title` -- property `rdfs:comment` with the value path `description` -- property `rdfs:seeAlso` with the value path `references` -- property `ctis:mitreAttackTechniqueId` is building with this formula with the value path `tags` - - Filter by regex: `^attack\.t\d+$` - - Regex replace `attack\.t` by `T` + - property `ctis:filename` with the result of this regular expression `^.*?([^\/]*)$` on the value path `rulePath` + - property `rdfs:label` with the value path `title` + - property `rdfs:comment` with the value path `description` + - property `rdfs:seeAlso` with the value path `references` + - property `ctis:mitreAttackTechniqueId` is building with this formula with the value path `tags` + - Filter by regex: `^attack\.t\d+$` + - Regex replace `attack\.t` by `T` -![](23-1-formula-mitreid.png) + ![](23-1-formula-mitreid.png) -- property `rdfs:isDefinedBy` on the value path `rulePath` is building with this formula to link the rules to their Web addresses. - - Add two "Regex replace" - - replace `\./hayabusa-rules/` by `https://github.com/Yamato-Security/hayabusa-rules/blob/main/` - - replace `\./sigma/` by `https://github.com/SigmaHQ/sigma/blob/master/` + - property `rdfs:isDefinedBy` on the value path `rulePath` is building with this formula to link the rules to their Web addresses. + - Add two "Regex replace" + - replace `\./hayabusa-rules/` by `https://github.com/Yamato-Security/hayabusa-rules/blob/main/` + - replace `\./sigma/` by `https://github.com/SigmaHQ/sigma/blob/master/` -![](23-1-rules-isdefinedby.png) + ![](23-1-rules-isdefinedby.png) -So the rulepath `./sigma/rules/windows/process_creation/proc_creation_win_bcdedit_boot_conf_tamper.yml` becomes the link `https://github.com/SigmaHQ/sigma/blob/master/rules/windows/process_creation/proc_creation_win_bcdedit_boot_conf_tamper.yml` and `./hayabusa-rules/hayabusa/sysmon/Sysmon_15_Info_ADS-Created.yml`becomes `https://github.com/Yamato-Security/hayabusa-rules/blob/main/hayabusa/sysmon/Sysmon_11_Med_FileCreated_RuleAlert.yml` + So the rulepath `./sigma/rules/windows/process_creation/proc_creation_win_bcdedit_boot_conf_tamper.yml` becomes the link `https://github.com/SigmaHQ/sigma/blob/master/rules/windows/process_creation/proc_creation_win_bcdedit_boot_conf_tamper.yml` and `./hayabusa-rules/hayabusa/sysmon/Sysmon_15_Info_ADS-Created.yml`becomes `https://github.com/Yamato-Security/hayabusa-rules/blob/main/hayabusa/sysmon/Sysmon_11_Med_FileCreated_RuleAlert.yml` -!!! Tips + !!! Tips - To test your transformer, you can use the tab "Transform execution". Here, the knowledge graph will not be cleared after each workflow or execution to test your transformer because the option "clear graph before workflow" is disabled. However during the steps to build this transformer, you can enable tempory this option to see and test the final transformer. - You need only to disable this option when your transformer is finished. + To test your transformer, you can use the tab "Transform execution". Here, the knowledge graph will not be cleared after each workflow or execution to test your transformer because the option "clear graph before workflow" is disabled. However during the steps to build this transformer, you can enable tempory this option to see and test the final transformer. + You need only to disable this option when your transformer is finished. -!!! Success + !!! Success - Your example of rule exists now in your knowledge graph: - ![](23-1-success-extract-rule2.png) - ![](23-1-success-extract-rule.png) + Your example of rule exists now in your knowledge graph: + ![](23-1-success-extract-rule2.png) + ![](23-1-success-extract-rule.png) 6. Make the workflow "Import rules" with one input ![](23-1-success-workflow.png) -And don't forget to allow the replacement of JSON dataset because it allows to replace this specific JSON by all other rules during the execution of this worflow. + And don't forget to allow the replacement of JSON dataset because it allows to replace this specific JSON by all other rules during the execution of this worflow. -![](23-1-workflow-allow-replacement.png) + ![](23-1-workflow-allow-replacement.png) -![](23-1-add-worflow.gif) + ![](23-1-add-worflow.gif) -1. Copy the workflow ID +7. Copy the workflow ID ![](23-1-id-worflow.gif) -!!! Success + !!! Success - In this example the ID of workflow is `RulesHayabusaSigma_671e1f43d94bbc36:Importrules_6ccbc14b656c75c9` + In this example the ID of workflow is `RulesHayabusaSigma_671e1f43d94bbc36:Importrules_6ccbc14b656c75c9` ## Apply the worflow to all files From 8bc6b669d2de98d7e4dfbb7f4ca4d317d4c777b8 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Wed, 17 Dec 2025 11:31:11 +0100 Subject: [PATCH 05/17] apply rumdl --fix (-es) --- .../command-reference/admin/acl/index.md | 2 + .../command-reference/admin/client/index.md | 2 + .../command-reference/admin/index.md | 2 + .../command-reference/admin/metrics/index.md | 2 + .../admin/migration/index.md | 2 + .../command-reference/admin/store/index.md | 2 + .../command-reference/admin/user/index.md | 2 + .../admin/workspace/index.md | 2 + .../admin/workspace/python/index.md | 2 + .../command-reference/config/index.md | 2 + .../command-reference/dataset/index.md | 2 + .../dataset/resource/index.md | 2 + .../command-reference/graph/imports/index.md | 2 + .../command-reference/graph/index.md | 2 + .../command-reference/graph/insights/index.md | 2 + .../graph/validation/index.md | 2 + .../command-reference/index.md | 2 + .../command-reference/project/file/index.md | 2 + .../command-reference/project/index.md | 2 + .../project/variable/index.md | 2 + .../command-reference/query/index.md | 2 + .../vocabulary/cache/index.md | 2 + .../command-reference/vocabulary/index.md | 2 + .../command-reference/workflow/index.md | 2 + .../workflow/scheduler/index.md | 2 + .../index.md | 4 +- .../extracting-data-from-a-web-api/index.md | 2 +- docs/build/integrations/index.md | 7 +++ .../index.md | 6 +-- docs/build/reference/aggregator/average.md | 4 +- .../reference/aggregator/firstNonEmpty.md | 4 +- .../reference/aggregator/geometricMean.md | 4 +- .../aggregator/handleMissingValues.md | 4 +- docs/build/reference/aggregator/index.md | 2 + docs/build/reference/aggregator/max.md | 4 +- docs/build/reference/aggregator/min.md | 4 +- docs/build/reference/aggregator/negate.md | 4 +- .../reference/aggregator/quadraticMean.md | 4 +- docs/build/reference/aggregator/scale.md | 4 +- .../reference/customtask/CancelWorkflow.md | 4 +- .../reference/customtask/ConcatenateToFile.md | 4 +- .../customtask/CustomSQLExecution.md | 4 +- docs/build/reference/customtask/DistinctBy.md | 4 +- .../customtask/JsonParserOperator.md | 4 +- docs/build/reference/customtask/Merge.md | 4 +- .../reference/customtask/MultiTableMerge.md | 4 +- docs/build/reference/customtask/Pivot.md | 4 +- docs/build/reference/customtask/Scheduler.md | 4 +- .../reference/customtask/SearchAddresses.md | 12 +++-- docs/build/reference/customtask/SendEMail.md | 4 +- .../reference/customtask/SparkFunction.md | 4 +- docs/build/reference/customtask/Template.md | 4 +- docs/build/reference/customtask/Unpivot.md | 4 +- .../reference/customtask/XmlParserOperator.md | 4 +- .../reference/customtask/addProjectFiles.md | 4 +- .../customtask/cmem-plugin-jq-workflow.md | 4 +- .../cmem_plugin_auth-workflow-auth-OAuth2.md | 4 +- .../cmem_plugin_graph_insights-Update.md | 4 +- ..._graphql-workflow-graphql-GraphQLPlugin.md | 4 +- ...in_irdi-workflow-irdi_plugin-IrdiPlugin.md | 4 +- .../customtask/cmem_plugin_jira-JqlQuery.md | 4 +- .../cmem_plugin_kafka-ReceiveMessages.md | 4 +- .../cmem_plugin_kafka-SendMessages.md | 4 +- .../cmem_plugin_kubernetes-Execute.md | 5 +- .../cmem_plugin_llm-CreateEmbeddings.md | 4 +- .../cmem_plugin_llm-ExecuteInstructions.md | 4 +- .../cmem_plugin_loopwf-task-StartWorkflow.md | 4 +- .../customtask/cmem_plugin_mattermost.md | 4 +- .../cmem_plugin_nextcloud-Download.md | 8 +-- .../customtask/cmem_plugin_nextcloud-List.md | 6 ++- .../cmem_plugin_nextcloud-Upload.md | 8 +-- .../cmem_plugin_office365-Download.md | 6 ++- .../customtask/cmem_plugin_office365-List.md | 6 ++- .../cmem_plugin_office365-Upload.md | 6 ++- ...cmem_plugin_parameters-ParametersPlugin.md | 6 ++- ...ugin_pdf_extract-pdf_extract-PdfExtract.md | 8 +-- .../customtask/cmem_plugin_pgvector-Search.md | 4 +- .../customtask/cmem_plugin_pgvector-Store.md | 4 +- .../cmem_plugin_project_resources-List.md | 6 ++- ...ugin_project_resources-UploadLocalFiles.md | 4 +- .../cmem_plugin_salesforce-SoqlQuery.md | 8 +-- ...force-workflow-operations-SobjectCreate.md | 6 ++- ...lugin_shapes-plugin_shapes-ShapesPlugin.md | 6 ++- ...itfile-plugin_splitfile-SplitFilePlugin.md | 4 +- .../customtask/cmem_plugin_ssh-Download.md | 8 +-- .../customtask/cmem_plugin_ssh-Execute.md | 8 +-- .../customtask/cmem_plugin_ssh-List.md | 8 +-- .../customtask/cmem_plugin_ssh-Upload.md | 8 +-- ...in_validation-validate-ValidateEntities.md | 10 ++-- ...lugin_validation-validate-ValidateGraph.md | 4 +- .../cmem_plugin_wfreports_get_report.md | 4 +- .../customtask/cmem_plugin_yaml-parse.md | 4 +- .../build/reference/customtask/combine-csv.md | 4 +- .../customtask/deleteProjectFiles.md | 6 ++- .../reference/customtask/downloadFile.md | 4 +- ...ataPlatformGraphStoreFileUploadOperator.md | 4 +- .../customtask/eccencaRestOperator.md | 15 +++--- .../reference/customtask/getProjectFiles.md | 4 +- docs/build/reference/customtask/index.md | 2 + .../reference/customtask/setParameters.md | 4 +- .../reference/customtask/shacl-pyshacl.md | 4 +- .../customtask/sparqlCopyOperator.md | 4 +- .../customtask/sparqlSelectOperator.md | 4 +- .../customtask/sparqlUpdateOperator.md | 8 +-- .../customtask/sqlUpdateQueryOperator.md | 4 +- .../customtask/tripleRequestOperator.md | 4 +- .../customtask/ucumNormalizationTask.md | 6 ++- .../customtask/validateXsdOperator.md | 4 +- .../reference/customtask/xsltOperator.md | 10 ++-- docs/build/reference/dataset/Hive.md | 4 +- docs/build/reference/dataset/Jdbc.md | 9 ++-- .../reference/dataset/LocalInternalDataset.md | 4 +- docs/build/reference/dataset/SnowflakeJdbc.md | 4 +- docs/build/reference/dataset/alignment.md | 4 +- docs/build/reference/dataset/avro.md | 4 +- docs/build/reference/dataset/binaryFile.md | 4 +- docs/build/reference/dataset/csv.md | 4 +- .../reference/dataset/eccencaDataPlatform.md | 4 +- docs/build/reference/dataset/excel.md | 4 +- docs/build/reference/dataset/file.md | 4 +- .../reference/dataset/googlespreadsheet.md | 6 ++- docs/build/reference/dataset/inMemory.md | 4 +- docs/build/reference/dataset/index.md | 2 + docs/build/reference/dataset/internal.md | 4 +- docs/build/reference/dataset/json.md | 4 +- docs/build/reference/dataset/multiCsv.md | 4 +- docs/build/reference/dataset/neo4j.md | 6 ++- .../reference/dataset/office365preadsheet.md | 4 +- docs/build/reference/dataset/orc.md | 4 +- docs/build/reference/dataset/parquet.md | 4 +- docs/build/reference/dataset/sparkView.md | 4 +- .../build/reference/dataset/sparqlEndpoint.md | 4 +- docs/build/reference/dataset/sqlEndpoint.md | 5 +- docs/build/reference/dataset/text.md | 4 +- docs/build/reference/dataset/xml.md | 4 +- .../PhysicalQuantitiesDistance.md | 4 +- .../distancemeasure/cjkReadingDistance.md | 4 +- .../distancemeasure/constantDistance.md | 4 +- .../build/reference/distancemeasure/cosine.md | 4 +- docs/build/reference/distancemeasure/date.md | 4 +- .../reference/distancemeasure/dateTime.md | 4 +- docs/build/reference/distancemeasure/dice.md | 4 +- .../reference/distancemeasure/equality.md | 4 +- .../reference/distancemeasure/greaterThan.md | 4 +- docs/build/reference/distancemeasure/index.md | 2 + .../reference/distancemeasure/inequality.md | 4 +- .../distancemeasure/insideNumericInterval.md | 4 +- .../reference/distancemeasure/isSubstring.md | 4 +- .../reference/distancemeasure/jaccard.md | 4 +- docs/build/reference/distancemeasure/jaro.md | 4 +- .../reference/distancemeasure/jaroWinkler.md | 4 +- .../distancemeasure/koreanPhonemeDistance.md | 4 +- .../distancemeasure/koreanTranslitDistance.md | 4 +- .../reference/distancemeasure/levenshtein.md | 4 +- .../distancemeasure/levenshteinDistance.md | 4 +- .../reference/distancemeasure/lowerThan.md | 4 +- docs/build/reference/distancemeasure/num.md | 4 +- .../distancemeasure/numericEquality.md | 4 +- .../build/reference/distancemeasure/qGrams.md | 4 +- .../distancemeasure/relaxedEquality.md | 4 +- .../reference/distancemeasure/softjaccard.md | 4 +- .../reference/distancemeasure/startsWith.md | 4 +- .../distancemeasure/substringDistance.md | 4 +- .../distancemeasure/tokenwiseDistance.md | 4 +- docs/build/reference/distancemeasure/wgs84.md | 4 +- docs/build/reference/index.md | 2 + .../reference/transformer/Combine/concat.md | 6 ++- .../transformer/Combine/concatMultiValues.md | 8 +-- .../transformer/Combine/concatPairwise.md | 4 +- .../reference/transformer/Combine/merge.md | 4 +- .../transformer/Conditional/containsAllOf.md | 4 +- .../transformer/Conditional/containsAnyOf.md | 4 +- .../transformer/Conditional/ifContains.md | 4 +- .../transformer/Conditional/ifExists.md | 4 +- .../transformer/Conditional/ifMatchesRegex.md | 4 +- .../Conditional/negateTransformer.md | 4 +- .../transformer/Conversion/convertCharset.md | 4 +- .../transformer/Date/compareDates.md | 4 +- .../reference/transformer/Date/currentDate.md | 4 +- .../transformer/Date/datetoTimestamp.md | 4 +- .../reference/transformer/Date/duration.md | 4 +- .../transformer/Date/durationInDays.md | 4 +- .../transformer/Date/durationInSeconds.md | 4 +- .../transformer/Date/durationInYears.md | 4 +- .../transformer/Date/numberToDuration.md | 4 +- .../reference/transformer/Date/parseDate.md | 4 +- .../reference/transformer/Date/timeToDate.md | 4 +- .../reference/transformer/Excel/Excel_ABS.md | 4 +- .../reference/transformer/Excel/Excel_ACOS.md | 4 +- .../transformer/Excel/Excel_ACOSH.md | 4 +- .../reference/transformer/Excel/Excel_AND.md | 4 +- .../reference/transformer/Excel/Excel_ASIN.md | 4 +- .../transformer/Excel/Excel_ASINH.md | 4 +- .../reference/transformer/Excel/Excel_ATAN.md | 4 +- .../transformer/Excel/Excel_ATAN2.md | 4 +- .../transformer/Excel/Excel_ATANH.md | 4 +- .../transformer/Excel/Excel_AVEDEV.md | 4 +- .../transformer/Excel/Excel_AVERAGE.md | 4 +- .../transformer/Excel/Excel_AVERAGEA.md | 4 +- .../transformer/Excel/Excel_CEILING.md | 4 +- .../transformer/Excel/Excel_CHOOSE.md | 4 +- .../transformer/Excel/Excel_CLEAN.md | 4 +- .../reference/transformer/Excel/Excel_CODE.md | 4 +- .../transformer/Excel/Excel_COMBIN.md | 4 +- .../transformer/Excel/Excel_CORREL.md | 4 +- .../reference/transformer/Excel/Excel_COS.md | 4 +- .../reference/transformer/Excel/Excel_COSH.md | 4 +- .../transformer/Excel/Excel_COUNT.md | 4 +- .../transformer/Excel/Excel_COUNTA.md | 4 +- .../transformer/Excel/Excel_COVAR.md | 4 +- .../transformer/Excel/Excel_DEGREES.md | 4 +- .../transformer/Excel/Excel_DEVSQ.md | 4 +- .../reference/transformer/Excel/Excel_EVEN.md | 4 +- .../transformer/Excel/Excel_EXACT.md | 4 +- .../reference/transformer/Excel/Excel_EXP.md | 4 +- .../reference/transformer/Excel/Excel_FACT.md | 4 +- .../transformer/Excel/Excel_FALSE.md | 4 +- .../reference/transformer/Excel/Excel_FIND.md | 4 +- .../transformer/Excel/Excel_FLOOR.md | 4 +- .../transformer/Excel/Excel_FORECAST.md | 4 +- .../reference/transformer/Excel/Excel_FV.md | 4 +- .../transformer/Excel/Excel_GEOMEAN.md | 4 +- .../reference/transformer/Excel/Excel_IF.md | 4 +- .../reference/transformer/Excel/Excel_INT.md | 4 +- .../transformer/Excel/Excel_INTERCEPT.md | 4 +- .../reference/transformer/Excel/Excel_IPMT.md | 4 +- .../reference/transformer/Excel/Excel_IRR.md | 4 +- .../transformer/Excel/Excel_LARGE.md | 4 +- .../reference/transformer/Excel/Excel_LEFT.md | 4 +- .../reference/transformer/Excel/Excel_LN.md | 4 +- .../reference/transformer/Excel/Excel_LOG.md | 4 +- .../transformer/Excel/Excel_LOG10.md | 4 +- .../reference/transformer/Excel/Excel_MAX.md | 4 +- .../reference/transformer/Excel/Excel_MAXA.md | 4 +- .../transformer/Excel/Excel_MEDIAN.md | 4 +- .../reference/transformer/Excel/Excel_MID.md | 4 +- .../reference/transformer/Excel/Excel_MIN.md | 4 +- .../reference/transformer/Excel/Excel_MINA.md | 4 +- .../reference/transformer/Excel/Excel_MIRR.md | 4 +- .../reference/transformer/Excel/Excel_MOD.md | 4 +- .../reference/transformer/Excel/Excel_MODE.md | 4 +- .../transformer/Excel/Excel_NORMDIST.md | 4 +- .../transformer/Excel/Excel_NORMINV.md | 4 +- .../transformer/Excel/Excel_NORMSDIST.md | 4 +- .../transformer/Excel/Excel_NORMSINV.md | 4 +- .../reference/transformer/Excel/Excel_NOT.md | 4 +- .../reference/transformer/Excel/Excel_NPER.md | 4 +- .../reference/transformer/Excel/Excel_NPV.md | 4 +- .../reference/transformer/Excel/Excel_ODD.md | 4 +- .../reference/transformer/Excel/Excel_OR.md | 4 +- .../transformer/Excel/Excel_PEARSON.md | 4 +- .../transformer/Excel/Excel_PERCENTILE.md | 4 +- .../transformer/Excel/Excel_PERCENTRANK.md | 4 +- .../reference/transformer/Excel/Excel_PI.md | 4 +- .../reference/transformer/Excel/Excel_PMT.md | 4 +- .../transformer/Excel/Excel_POISSON.md | 4 +- .../transformer/Excel/Excel_POWER.md | 4 +- .../reference/transformer/Excel/Excel_PPMT.md | 4 +- .../transformer/Excel/Excel_PRODUCT.md | 4 +- .../transformer/Excel/Excel_PROPER.md | 4 +- .../reference/transformer/Excel/Excel_PV.md | 4 +- .../transformer/Excel/Excel_RADIANS.md | 4 +- .../reference/transformer/Excel/Excel_RAND.md | 4 +- .../reference/transformer/Excel/Excel_RANK.md | 4 +- .../reference/transformer/Excel/Excel_RATE.md | 4 +- .../transformer/Excel/Excel_REPLACE.md | 4 +- .../reference/transformer/Excel/Excel_REPT.md | 4 +- .../transformer/Excel/Excel_RIGHT.md | 4 +- .../transformer/Excel/Excel_ROMAN.md | 4 +- .../transformer/Excel/Excel_ROUND.md | 4 +- .../transformer/Excel/Excel_ROUNDDOWN.md | 4 +- .../transformer/Excel/Excel_ROUNDUP.md | 4 +- .../transformer/Excel/Excel_SEARCH.md | 4 +- .../reference/transformer/Excel/Excel_SIGN.md | 4 +- .../reference/transformer/Excel/Excel_SIN.md | 4 +- .../reference/transformer/Excel/Excel_SINH.md | 4 +- .../transformer/Excel/Excel_SLOPE.md | 4 +- .../transformer/Excel/Excel_SMALL.md | 4 +- .../reference/transformer/Excel/Excel_SQRT.md | 4 +- .../transformer/Excel/Excel_STANDARDIZE.md | 4 +- .../transformer/Excel/Excel_STDEV.md | 4 +- .../transformer/Excel/Excel_STDEVA.md | 4 +- .../transformer/Excel/Excel_STDEVP.md | 4 +- .../transformer/Excel/Excel_STDEVPA.md | 4 +- .../transformer/Excel/Excel_SUBSTITUTE.md | 4 +- .../reference/transformer/Excel/Excel_SUM.md | 4 +- .../transformer/Excel/Excel_SUMPRODUCT.md | 4 +- .../transformer/Excel/Excel_SUMSQ.md | 4 +- .../transformer/Excel/Excel_SUMX2MY2.md | 4 +- .../transformer/Excel/Excel_SUMX2PY2.md | 4 +- .../transformer/Excel/Excel_SUMXMY2.md | 4 +- .../reference/transformer/Excel/Excel_TAN.md | 4 +- .../reference/transformer/Excel/Excel_TANH.md | 4 +- .../transformer/Excel/Excel_TDIST.md | 4 +- .../reference/transformer/Excel/Excel_TRUE.md | 4 +- .../transformer/Excel/Excel_TRUNC.md | 4 +- .../reference/transformer/Excel/Excel_VAR.md | 4 +- .../reference/transformer/Excel/Excel_VARA.md | 4 +- .../reference/transformer/Excel/Excel_VARP.md | 4 +- .../transformer/Excel/Excel_VARPA.md | 4 +- .../transformer/Extract/regexExtract.md | 4 +- .../transformer/Filter/filterByLength.md | 4 +- .../transformer/Filter/filterByRegex.md | 4 +- .../Filter/removeDefaultStopWords.md | 4 +- .../transformer/Filter/removeEmptyValues.md | 4 +- .../Filter/removeRemoteStopWords.md | 4 +- .../transformer/Filter/removeStopWords.md | 4 +- .../transformer/Filter/removeValues.md | 4 +- .../transformer/Geo/RetrieveCoordinates.md | 12 +++-- .../transformer/Geo/RetrieveLatitude.md | 12 +++-- .../transformer/Geo/RetrieveLongitude.md | 12 +++-- .../transformer/Linguistic/NYSIIS.md | 4 +- .../transformer/Linguistic/metaphone.md | 4 +- .../transformer/Linguistic/normalizeChars.md | 4 +- .../transformer/Linguistic/soundex.md | 4 +- .../reference/transformer/Linguistic/stem.md | 4 +- .../transformer/Metadata/fileHash.md | 4 +- .../Metadata/inputFileAttributes.md | 4 +- .../Metadata/inputTaskAttributes.md | 4 +- .../transformer/Normalize/alphaReduce.md | 4 +- .../transformer/Normalize/camelCase.md | 4 +- .../transformer/Normalize/capitalize.md | 4 +- .../transformer/Normalize/htmlCleaner.md | 4 +- .../transformer/Normalize/lowerCase.md | 4 +- .../transformer/Normalize/removeBlanks.md | 4 +- .../transformer/Normalize/removeDuplicates.md | 4 +- .../Normalize/removeParentheses.md | 4 +- .../Normalize/removeSpecialChars.md | 4 +- .../transformer/Normalize/sortWords.md | 6 ++- .../reference/transformer/Normalize/trim.md | 4 +- .../transformer/Normalize/upperCase.md | 4 +- .../reference/transformer/Normalize/uriFix.md | 4 +- .../transformer/Normalize/urlEncode.md | 4 +- .../Numeric/PhysicalQuantitiesNormalizer.md | 4 +- .../transformer/Numeric/aggregateNumbers.md | 4 +- .../Numeric/cmem-plugin-number-conversion.md | 4 +- .../transformer/Numeric/compareNumbers.md | 4 +- .../Numeric/extractPhysicalQuantity.md | 4 +- .../transformer/Numeric/formatNumber.md | 4 +- .../reference/transformer/Numeric/log.md | 4 +- .../transformer/Numeric/numOperation.md | 4 +- .../transformer/Numeric/numReduce.md | 4 +- .../transformer/Parser/DateTypeParser.md | 4 +- .../transformer/Parser/FloatTypeParser.md | 4 +- .../transformer/Parser/GeoCoordinateParser.md | 4 +- .../transformer/Parser/GeoLocationParser.md | 4 +- .../transformer/Parser/IntegerParser.md | 4 +- .../transformer/Parser/IsinParser.md | 4 +- .../transformer/Parser/SkosTypeParser.md | 4 +- .../transformer/Parser/StringParser.md | 4 +- .../reference/transformer/Replace/excelMap.md | 4 +- .../reference/transformer/Replace/map.md | 4 +- .../Replace/mapWithDefaultInput.md | 4 +- .../transformer/Replace/regexReplace.md | 4 +- .../reference/transformer/Replace/replace.md | 6 ++- .../transformer/Selection/coalesce.md | 4 +- .../transformer/Selection/regexSelect.md | 4 +- .../reference/transformer/Sequence/count.md | 4 +- .../transformer/Sequence/getValueByIndex.md | 5 +- .../reference/transformer/Sequence/sort.md | 4 +- .../transformer/Sequence/toSequenceIndex.md | 4 +- .../transformer/Substring/stripPostfix.md | 4 +- .../transformer/Substring/stripPrefix.md | 4 +- .../transformer/Substring/stripUriPrefix.md | 4 +- .../transformer/Substring/substring.md | 4 +- .../transformer/Substring/untilCharacter.md | 4 +- .../Template/TemplateTransformer.md | 12 +++-- .../Tokenization/camelcasetokenizer.md | 4 +- .../transformer/Tokenization/tokenize.md | 4 +- .../Uncategorized/cmem-plugin-jq-transform.md | 4 +- .../cmem_plugin_currencies-transform.md | 4 +- .../Validation/validateDateAfter.md | 4 +- .../Validation/validateDateRange.md | 4 +- .../Validation/validateNumberOfValues.md | 4 +- .../Validation/validateNumericRange.md | 4 +- .../transformer/Validation/validateRegex.md | 4 +- .../transformer/Value/cmem-plugin-ulid.md | 4 +- .../cmem_plugin_uuid-plugin_uuid-UUID1.md | 4 +- ...em_plugin_uuid-plugin_uuid-UUID1ToUUID6.md | 4 +- .../cmem_plugin_uuid-plugin_uuid-UUID3.md | 4 +- .../cmem_plugin_uuid-plugin_uuid-UUID4.md | 4 +- .../cmem_plugin_uuid-plugin_uuid-UUID5.md | 4 +- .../cmem_plugin_uuid-plugin_uuid-UUID6.md | 4 +- .../cmem_plugin_uuid-plugin_uuid-UUID7.md | 4 +- .../cmem_plugin_uuid-plugin_uuid-UUID8.md | 4 +- ...mem_plugin_uuid-plugin_uuid-UUIDConvert.md | 4 +- ...mem_plugin_uuid-plugin_uuid-UUIDVersion.md | 4 +- .../reference/transformer/Value/constant.md | 4 +- .../transformer/Value/constantUri.md | 4 +- .../transformer/Value/datasetParameter.md | 4 +- .../transformer/Value/defaultValue.md | 4 +- .../reference/transformer/Value/emptyValue.md | 4 +- .../reference/transformer/Value/inputHash.md | 4 +- .../transformer/Value/randomNumber.md | 4 +- .../transformer/Value/readParameter.md | 4 +- .../build/reference/transformer/Value/uuid.md | 4 +- docs/build/reference/transformer/index.md | 2 + .../activity-reference/index.md | 54 +++++++++---------- .../explore/dataplatform/application-full.md | 20 +++---- .../dataplatform/application-oauth-full.md | 2 +- .../explore/graph-resource-pattern/index.md | 2 +- docs/develop/python-plugins/setup/index.md | 2 +- .../datatype-reference/index.md | 6 ++- .../node-shapes/index.md | 2 + .../property-shapes/index.md | 2 + .../graph-exploration/index.md | 41 +++++++------- .../corporate-memory-21-04/index.md | 8 +-- .../corporate-memory-24-2/index.md | 2 +- .../corporate-memory-25-3/index.md | 1 + docs/testing.md | 12 +++-- 410 files changed, 1317 insertions(+), 507 deletions(-) diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/acl/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/acl/index.md index 31b464ae7..1256b3527 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/acl/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/acl/index.md @@ -6,7 +6,9 @@ tags: - Security - cmemc --- + # admin acl Command Group + List, create, delete and modify and review access conditions. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/client/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/client/index.md index 40df05985..485794255 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/client/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/client/index.md @@ -7,7 +7,9 @@ tags: - Security - cmemc --- + # admin client Command Group + List client accounts, get or generate client account secrets. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/index.md index fcdf08852..723b97d90 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/index.md @@ -5,7 +5,9 @@ icon: material/key-link tags: - cmemc --- + # admin Command Group + Import bootstrap data, backup/restore workspace or get status. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/metrics/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/metrics/index.md index c52c4b772..9b5645fd4 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/metrics/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/metrics/index.md @@ -5,7 +5,9 @@ icon: material/chart-line-variant tags: - cmemc --- + # admin metrics Command Group + List and get metrics. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/migration/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/migration/index.md index 2ac6119b8..a38c279eb 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/migration/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/migration/index.md @@ -5,7 +5,9 @@ icon: material/database-arrow-up-outline tags: - cmemc --- + # admin migration Command Group + List and apply migration recipes. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/store/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/store/index.md index 443245576..ea75c24dd 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/store/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/store/index.md @@ -6,7 +6,9 @@ tags: - SPARQL - cmemc --- + # admin store Command Group + Import, export and bootstrap the knowledge graph store. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/user/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/user/index.md index 28cca892e..3460e3972 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/user/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/user/index.md @@ -7,7 +7,9 @@ tags: - Security - cmemc --- + # admin user Command Group + List, create, delete and modify user accounts. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/index.md index f8d0f6f69..a83076c07 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/index.md @@ -5,7 +5,9 @@ icon: material/folder-multiple-outline tags: - cmemc --- + # admin workspace Command Group + Import, export and reload the project workspace. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/python/index.md b/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/python/index.md index 251d86113..7d0662593 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/python/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/admin/workspace/python/index.md @@ -6,7 +6,9 @@ tags: - Python - cmemc --- + # admin workspace python Command Group + List, install, or uninstall python packages. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/config/index.md b/docs/automate/cmemc-command-line-interface/command-reference/config/index.md index 190479152..d110a2baf 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/config/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/config/index.md @@ -6,7 +6,9 @@ tags: - Configuration - cmemc --- + # config Command Group + ```text diff --git a/docs/automate/cmemc-command-line-interface/command-reference/dataset/index.md b/docs/automate/cmemc-command-line-interface/command-reference/dataset/index.md index 57e050f15..efb8e22ae 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/dataset/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/dataset/index.md @@ -5,7 +5,9 @@ icon: eccenca/artefact-dataset tags: - cmemc --- + # dataset Command Group + List, create, delete, inspect, up-/download or open datasets. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/dataset/resource/index.md b/docs/automate/cmemc-command-line-interface/command-reference/dataset/resource/index.md index c668607b0..30482a870 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/dataset/resource/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/dataset/resource/index.md @@ -5,7 +5,9 @@ icon: octicons/cross-reference-24 tags: - cmemc --- + # dataset resource Command Group + List, inspect or delete dataset file resources. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/graph/imports/index.md b/docs/automate/cmemc-command-line-interface/command-reference/graph/imports/index.md index 59bc9b6fb..226257fdc 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/graph/imports/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/graph/imports/index.md @@ -6,7 +6,9 @@ tags: - KnowledgeGraph - cmemc --- + # graph imports Command Group + List, create, delete and show graph imports. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/graph/index.md b/docs/automate/cmemc-command-line-interface/command-reference/graph/index.md index c1cc798af..7ff31d2e4 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/graph/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/graph/index.md @@ -6,7 +6,9 @@ tags: - KnowledgeGraph - cmemc --- + # graph Command Group + List, import, export, delete, count, tree or open graphs. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/graph/insights/index.md b/docs/automate/cmemc-command-line-interface/command-reference/graph/insights/index.md index 3934666a0..7c164a446 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/graph/insights/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/graph/insights/index.md @@ -5,7 +5,9 @@ icon: eccenca/graph-insights tags: - cmemc --- + # graph insights Command Group + List, create, delete and inspect graph insight snapshots. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/graph/validation/index.md b/docs/automate/cmemc-command-line-interface/command-reference/graph/validation/index.md index f9d5528d1..e9fd8b9ac 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/graph/validation/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/graph/validation/index.md @@ -7,7 +7,9 @@ tags: - Validation - cmemc --- + # graph validation Command Group + Validate resources in a graph. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/index.md b/docs/automate/cmemc-command-line-interface/command-reference/index.md index 0026c6448..1849b17f7 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/index.md @@ -6,7 +6,9 @@ tags: - Reference - cmemc --- + # Command Reference + !!! info diff --git a/docs/automate/cmemc-command-line-interface/command-reference/project/file/index.md b/docs/automate/cmemc-command-line-interface/command-reference/project/file/index.md index be1ed4a6d..16d6ef171 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/project/file/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/project/file/index.md @@ -6,7 +6,9 @@ tags: - Files - cmemc --- + # project file Command Group + List, inspect, up-/download or delete project file resources. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/project/index.md b/docs/automate/cmemc-command-line-interface/command-reference/project/index.md index 10a636684..5e0a703da 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/project/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/project/index.md @@ -6,7 +6,9 @@ tags: - Project - cmemc --- + # project Command Group + List, import, export, create, delete or open projects. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/project/variable/index.md b/docs/automate/cmemc-command-line-interface/command-reference/project/variable/index.md index 8e6e29fd0..b0fb759d6 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/project/variable/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/project/variable/index.md @@ -6,7 +6,9 @@ tags: - Variables - cmemc --- + # project variable Command Group + List, create, delete or get data from project variables. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/query/index.md b/docs/automate/cmemc-command-line-interface/command-reference/query/index.md index 27246cae7..fa2be429c 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/query/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/query/index.md @@ -6,7 +6,9 @@ tags: - SPARQL - cmemc --- + # query Command Group + List, execute, get status or open SPARQL queries. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/cache/index.md b/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/cache/index.md index c87bd4c99..8a2269062 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/cache/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/cache/index.md @@ -6,7 +6,9 @@ tags: - Vocabulary - cmemc --- + # vocabulary cache Command Group + List und update the vocabulary cache. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/index.md b/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/index.md index e874df055..4591d16c4 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/vocabulary/index.md @@ -6,7 +6,9 @@ tags: - Vocabulary - cmemc --- + # vocabulary Command Group + List, (un-)install, import or open vocabs / manage cache. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/workflow/index.md b/docs/automate/cmemc-command-line-interface/command-reference/workflow/index.md index 312165d79..ca2fce505 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/workflow/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/workflow/index.md @@ -6,7 +6,9 @@ tags: - Workflow - cmemc --- + # workflow Command Group + List, execute, status or open (io) workflows. diff --git a/docs/automate/cmemc-command-line-interface/command-reference/workflow/scheduler/index.md b/docs/automate/cmemc-command-line-interface/command-reference/workflow/scheduler/index.md index f18beaaa0..be55a806d 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/workflow/scheduler/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/workflow/scheduler/index.md @@ -6,7 +6,9 @@ tags: - Automate - cmemc --- + # workflow scheduler Command Group + List, inspect, enable/disable or open scheduler. diff --git a/docs/automate/processing-data-with-variable-input-workflows/index.md b/docs/automate/processing-data-with-variable-input-workflows/index.md index 26e952a1e..c3721a523 100644 --- a/docs/automate/processing-data-with-variable-input-workflows/index.md +++ b/docs/automate/processing-data-with-variable-input-workflows/index.md @@ -26,7 +26,7 @@ This allows for solving all kinds of [☆ Automation](../index.md) tasks when yo - by using the [command line interface](../cmemc-command-line-interface/index.md) ``` shell-session - $ cmemc -c my-cmem project import tutorial-varinput.project.zip varinput + cmemc -c my-cmem project import tutorial-varinput.project.zip varinput ``` ## 1 Install the required vocabularies @@ -81,7 +81,7 @@ For this, you need to use the `workflow io` command: ``` shell-session # process one specific feed xml document -$ cmemc workflow io varinput:process-feed -i feed.xml +cmemc workflow io varinput:process-feed -i feed.xml ``` You can easily automate this for a [list of feeds](feeds.txt) like this: diff --git a/docs/build/extracting-data-from-a-web-api/index.md b/docs/build/extracting-data-from-a-web-api/index.md index 2b2939a32..c162cb0f2 100644 --- a/docs/build/extracting-data-from-a-web-api/index.md +++ b/docs/build/extracting-data-from-a-web-api/index.md @@ -18,7 +18,7 @@ The tutorial is based on the [GitHub API (v3)](https://developer.github.com/v3/) - by using the [command line interface](../../automate/cmemc-command-line-interface/index.md) ``` shell-session - $ cmemc -c my-cmem project import tutorial-webapi.project.zip web-api + cmemc -c my-cmem project import tutorial-webapi.project.zip web-api ``` In order to get familiar with the API, simply fetch an example response with this command: diff --git a/docs/build/integrations/index.md b/docs/build/integrations/index.md index 8a1005a68..ebaa47c92 100644 --- a/docs/build/integrations/index.md +++ b/docs/build/integrations/index.md @@ -7,7 +7,9 @@ tags: - Build - Reference --- + # Integrations + The following services and applications can be easily integrated in Corporate Memory workflows: @@ -67,6 +69,7 @@ to interact with any [Azure AI Foundry provided Large Language Models](https://a Load and write Knowledge Graphs to an external GraphDB store by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. Query data from GraphDB by using the SPARQL + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), [Select](../../build/reference/customtask/sparqlSelectOperator.md) and [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. @@ -155,6 +158,7 @@ the [Send Mattermost messages](../../build/reference/customtask/cmem_plugin_matt Load and write Knowledge Graphs to Amazon Neptune by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. Query data from Amazon Neptune by using the SPARQL + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), [Select](../../build/reference/customtask/sparqlSelectOperator.md) and [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. @@ -233,6 +237,7 @@ using the [Search Vector Embeddings](../../build/reference/customtask/cmem_plugi Load and write Knowledge Graphs to an external Qlever store by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. Query data from Qlever by using the SPARQL + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), [Select](../../build/reference/customtask/sparqlSelectOperator.md) and [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. @@ -299,6 +304,7 @@ execute a [SOQL query (Salesforce)](../../build/reference/customtask/cmem_plugin Load and write Knowledge Graphs to an external Tentris store by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. Query data from Tentris by using the SPARQL + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), [Select](../../build/reference/customtask/sparqlSelectOperator.md) and [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. @@ -317,6 +323,7 @@ Tentris can be used as the integrated Quad Store as well (beta). Load and write Knowledge Graphs to an external Openlink Virtuoso store by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. Query data from Virtuoso by using the SPARQL + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), [Select](../../build/reference/customtask/sparqlSelectOperator.md) and [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. diff --git a/docs/build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md b/docs/build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md index fe765c3b8..e7fe2d933 100644 --- a/docs/build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md +++ b/docs/build/lift-data-from-tabular-data-such-as-csv-xslx-or-database-tables/index.md @@ -19,7 +19,7 @@ This beginner-level tutorial shows how you can build a Knowledge Graph based on - by using the [command line interface](../../automate/cmemc-command-line-interface/index.md) ``` shell-session - $ cmemc -c my-cmem project import tutorial-csv.project.zip tutorial-csv + cmemc -c my-cmem project import tutorial-csv.project.zip tutorial-csv ``` This step is optional and makes some of the following steps of the tutorial superfluous. @@ -89,7 +89,7 @@ The vocabulary contains the classes and properties needed to map the data into t === "cmemc" ``` shell-session - $ cmemc vocabulary import products_vocabulary.nt + cmemc vocabulary import products_vocabulary.nt ``` --- @@ -158,7 +158,7 @@ The vocabulary contains the classes and properties needed to map the data into t The general form of the JDBC connection string is: - ``` + ```text jdbc:://:/ ``` diff --git a/docs/build/reference/aggregator/average.md b/docs/build/reference/aggregator/average.md index 689aa6942..3fe305506 100644 --- a/docs/build/reference/aggregator/average.md +++ b/docs/build/reference/aggregator/average.md @@ -2,9 +2,11 @@ title: "Average" description: "Computes the weighted average." icon: octicons/cross-reference-24 -tags: +tags: --- + # Average + Computes the weighted average. diff --git a/docs/build/reference/aggregator/firstNonEmpty.md b/docs/build/reference/aggregator/firstNonEmpty.md index 347595037..a6b11e3ee 100644 --- a/docs/build/reference/aggregator/firstNonEmpty.md +++ b/docs/build/reference/aggregator/firstNonEmpty.md @@ -2,9 +2,11 @@ title: "First non-empty score" description: "Forwards the first input that provides a non-empty similarity score." icon: octicons/cross-reference-24 -tags: +tags: --- + # First non-empty score + Forwards the first input that provides a non-empty similarity score. diff --git a/docs/build/reference/aggregator/geometricMean.md b/docs/build/reference/aggregator/geometricMean.md index 2fa2d70e4..07d4b5445 100644 --- a/docs/build/reference/aggregator/geometricMean.md +++ b/docs/build/reference/aggregator/geometricMean.md @@ -2,9 +2,11 @@ title: "Geometric mean" description: "Compute the (weighted) geometric mean." icon: octicons/cross-reference-24 -tags: +tags: --- + # Geometric mean + Compute the (weighted) geometric mean. diff --git a/docs/build/reference/aggregator/handleMissingValues.md b/docs/build/reference/aggregator/handleMissingValues.md index 39e97f1ea..ad5c35796 100644 --- a/docs/build/reference/aggregator/handleMissingValues.md +++ b/docs/build/reference/aggregator/handleMissingValues.md @@ -2,9 +2,11 @@ title: "Handle missing values" description: "Generates a default similarity score, if no similarity score is provided (e.g., due to missing values). Using this operator can have a performance impact, since it lowers the efficiency of the underlying computation." icon: octicons/cross-reference-24 -tags: +tags: --- + # Handle missing values + Generates a default similarity score, if no similarity score is provided (e.g., due to missing values). Using this operator can have a performance impact, since it lowers the efficiency of the underlying computation. diff --git a/docs/build/reference/aggregator/index.md b/docs/build/reference/aggregator/index.md index f3a309847..8b5789c3a 100644 --- a/docs/build/reference/aggregator/index.md +++ b/docs/build/reference/aggregator/index.md @@ -5,7 +5,9 @@ tags: - Build - Reference --- + # Aggregators + This kind of task aggregates multiple similarity scores. diff --git a/docs/build/reference/aggregator/max.md b/docs/build/reference/aggregator/max.md index 9f57d104b..ad5ba2dbb 100644 --- a/docs/build/reference/aggregator/max.md +++ b/docs/build/reference/aggregator/max.md @@ -2,9 +2,11 @@ title: "Or" description: "At least one input score must be within the threshold. Selects the maximum score." icon: octicons/cross-reference-24 -tags: +tags: --- + # Or + At least one input score must be within the threshold. Selects the maximum score. diff --git a/docs/build/reference/aggregator/min.md b/docs/build/reference/aggregator/min.md index 03a36e0da..7b0b5ba2d 100644 --- a/docs/build/reference/aggregator/min.md +++ b/docs/build/reference/aggregator/min.md @@ -2,9 +2,11 @@ title: "And" description: "All input scores must be within the threshold. Selects the minimum score." icon: octicons/cross-reference-24 -tags: +tags: --- + # And + All input scores must be within the threshold. Selects the minimum score. diff --git a/docs/build/reference/aggregator/negate.md b/docs/build/reference/aggregator/negate.md index e333ef88d..208d1cd16 100644 --- a/docs/build/reference/aggregator/negate.md +++ b/docs/build/reference/aggregator/negate.md @@ -2,9 +2,11 @@ title: "Negate" description: "Negates the result of the input comparison. A single input is expected. Using this operator can have a performance impact, since it lowers the efficiency of the underlying computation." icon: octicons/cross-reference-24 -tags: +tags: --- + # Negate + Negates the result of the input comparison. A single input is expected. Using this operator can have a performance impact, since it lowers the efficiency of the underlying computation. diff --git a/docs/build/reference/aggregator/quadraticMean.md b/docs/build/reference/aggregator/quadraticMean.md index e8cca0408..e13f6d1c2 100644 --- a/docs/build/reference/aggregator/quadraticMean.md +++ b/docs/build/reference/aggregator/quadraticMean.md @@ -2,9 +2,11 @@ title: "Euclidian distance" description: "Calculates the Euclidian distance." icon: octicons/cross-reference-24 -tags: +tags: --- + # Euclidian distance + Calculates the Euclidian distance. diff --git a/docs/build/reference/aggregator/scale.md b/docs/build/reference/aggregator/scale.md index bfc7bc017..d28969d8e 100644 --- a/docs/build/reference/aggregator/scale.md +++ b/docs/build/reference/aggregator/scale.md @@ -2,9 +2,11 @@ title: "Scale" description: "Scales a similarity score by a factor." icon: octicons/cross-reference-24 -tags: +tags: --- + # Scale + Scales a similarity score by a factor. diff --git a/docs/build/reference/customtask/CancelWorkflow.md b/docs/build/reference/customtask/CancelWorkflow.md index c42a879c0..e771d2365 100644 --- a/docs/build/reference/customtask/CancelWorkflow.md +++ b/docs/build/reference/customtask/CancelWorkflow.md @@ -2,10 +2,12 @@ title: "Cancel Workflow" description: "Cancels a workflow if a specified condition is fulfilled. A typical use case for this operator is to cancel the workflow execution if the input data is empty." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Cancel Workflow + Cancels a workflow if a specified condition is fulfilled. A typical use case for this operator is to cancel the workflow execution if the input data is empty. diff --git a/docs/build/reference/customtask/ConcatenateToFile.md b/docs/build/reference/customtask/ConcatenateToFile.md index cb0234e42..fc5923607 100644 --- a/docs/build/reference/customtask/ConcatenateToFile.md +++ b/docs/build/reference/customtask/ConcatenateToFile.md @@ -2,10 +2,12 @@ title: "Concatenate to file" description: "Concatenates values into a file." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Concatenate to file + Concatenates values into a file. diff --git a/docs/build/reference/customtask/CustomSQLExecution.md b/docs/build/reference/customtask/CustomSQLExecution.md index db650fd1d..af69ed2da 100644 --- a/docs/build/reference/customtask/CustomSQLExecution.md +++ b/docs/build/reference/customtask/CustomSQLExecution.md @@ -2,10 +2,12 @@ title: "Spark SQL query" description: "Executes a custom SQL query on the first input Spark dataframe and returns the result as its output." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Spark SQL query + Executes a custom SQL query on the first input Spark dataframe and returns the result as its output. diff --git a/docs/build/reference/customtask/DistinctBy.md b/docs/build/reference/customtask/DistinctBy.md index 24a123a56..b5d6fa1df 100644 --- a/docs/build/reference/customtask/DistinctBy.md +++ b/docs/build/reference/customtask/DistinctBy.md @@ -2,10 +2,12 @@ title: "Distinct by" description: "Removes duplicated entities based on a user-defined path. Note that this operator does not retain the order of the entities." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Distinct by + Removes duplicated entities based on a user-defined path. Note that this operator does not retain the order of the entities. diff --git a/docs/build/reference/customtask/JsonParserOperator.md b/docs/build/reference/customtask/JsonParserOperator.md index 4f0fe9789..12f0da925 100644 --- a/docs/build/reference/customtask/JsonParserOperator.md +++ b/docs/build/reference/customtask/JsonParserOperator.md @@ -2,10 +2,12 @@ title: "Parse JSON" description: "Parses an incoming entity as a JSON dataset. Typically, it is used before a transformation task. Takes exactly one input of which only the first entity is processed." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Parse JSON + Parses an incoming entity as a JSON dataset. Typically, it is used before a transformation task. Takes exactly one input of which only the first entity is processed. diff --git a/docs/build/reference/customtask/Merge.md b/docs/build/reference/customtask/Merge.md index 8ec6a9927..1bec86c94 100644 --- a/docs/build/reference/customtask/Merge.md +++ b/docs/build/reference/customtask/Merge.md @@ -2,10 +2,12 @@ title: "Join tables" description: "Joins a set of inputs into a single table. Expects a list of entity tables and links. All entity tables are joined into the first entity table using the provided links." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Join tables + Joins a set of inputs into a single table. Expects a list of entity tables and links. All entity tables are joined into the first entity table using the provided links. diff --git a/docs/build/reference/customtask/MultiTableMerge.md b/docs/build/reference/customtask/MultiTableMerge.md index 36fedc632..8234a4120 100644 --- a/docs/build/reference/customtask/MultiTableMerge.md +++ b/docs/build/reference/customtask/MultiTableMerge.md @@ -2,10 +2,12 @@ title: "Merge tables" description: "Stores sets of instance and mapping inputs as relational tables with the mapping as an n:m relation. Expects a list of entity tables and links. All entity tables have a relation to the first entity table using the provided links." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Merge tables + Stores sets of instance and mapping inputs as relational tables with the mapping as an n:m relation. Expects a list of entity tables and links. All entity tables have a relation to the first entity table using the provided links. diff --git a/docs/build/reference/customtask/Pivot.md b/docs/build/reference/customtask/Pivot.md index 8576d5555..b04936a87 100644 --- a/docs/build/reference/customtask/Pivot.md +++ b/docs/build/reference/customtask/Pivot.md @@ -2,10 +2,12 @@ title: "Pivot" description: "The pivot operator takes data in separate rows, aggregates it and converts it into columns." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Pivot + The pivot operator takes data in separate rows, aggregates it and converts it into columns. diff --git a/docs/build/reference/customtask/Scheduler.md b/docs/build/reference/customtask/Scheduler.md index 4ec52f9aa..80523b26b 100644 --- a/docs/build/reference/customtask/Scheduler.md +++ b/docs/build/reference/customtask/Scheduler.md @@ -2,10 +2,12 @@ title: "Scheduler" description: "Executes a workflow at specified intervals." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Scheduler + The eccenca Build plugin `Scheduler` executes a given workflow at specified intervals. diff --git a/docs/build/reference/customtask/SearchAddresses.md b/docs/build/reference/customtask/SearchAddresses.md index 14da6ea34..fb840f1aa 100644 --- a/docs/build/reference/customtask/SearchAddresses.md +++ b/docs/build/reference/customtask/SearchAddresses.md @@ -2,10 +2,12 @@ title: "Search addresses" description: "Looks up locations from textual descriptions using the configured geocoding API. Outputs results as RDF." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Search addresses + **Configuration** @@ -18,18 +20,18 @@ The default configuration is as follows: # url = "https://nominatim.eccenca.com/search" url = "https://photon.komoot.de/api" # url = https://api-adresse.data.gouv.fr/search - + # Additional URL parameters to be attached to all HTTP search requests. Example: '&countrycodes=de&addressdetails=1'. # Will be attached in addition to the parameters set on each search operator directly. searchParameters = "" - + # The minimum pause time between subsequent queries pauseTime = 1s - + # Number of coordinates to be cached in-memory cacheSize = 10 } - + In general, all services adhering to the [Nominatim search API](https://nominatim.org/release-docs/develop/api/Search/) should be usable. Please note that when using public services, the pause time should be set to avoid overloading. diff --git a/docs/build/reference/customtask/SendEMail.md b/docs/build/reference/customtask/SendEMail.md index d0cfc3e0d..b622858a9 100644 --- a/docs/build/reference/customtask/SendEMail.md +++ b/docs/build/reference/customtask/SendEMail.md @@ -2,10 +2,12 @@ title: "Send email" description: "Sends an email using an SMTP server." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Send email + Sends an email using an SMTP server with support for both plain text and HTML formatted messages. diff --git a/docs/build/reference/customtask/SparkFunction.md b/docs/build/reference/customtask/SparkFunction.md index 9197aeadc..276ddd840 100644 --- a/docs/build/reference/customtask/SparkFunction.md +++ b/docs/build/reference/customtask/SparkFunction.md @@ -2,10 +2,12 @@ title: "Execute Spark function" description: "Applies a specified Scala function to a specified field." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Execute Spark function + Applies a specified Scala function to a specified field. diff --git a/docs/build/reference/customtask/Template.md b/docs/build/reference/customtask/Template.md index 8dbf6b24b..1fd4b7adf 100644 --- a/docs/build/reference/customtask/Template.md +++ b/docs/build/reference/customtask/Template.md @@ -2,10 +2,12 @@ title: "Evaluate template" description: "Evaluates a template on a sequence of entities. Can be used after a transformation or directly after datasets that output a single table, such as CSV or Excel." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Evaluate template + The template operator supports the Jinja templating language. Documentation about Jinja can be found in the official [Template Designer Documentation](https://jinja.palletsprojects.com/en/2.11.x/templates/). diff --git a/docs/build/reference/customtask/Unpivot.md b/docs/build/reference/customtask/Unpivot.md index 32a0ab22e..ca7b97c47 100644 --- a/docs/build/reference/customtask/Unpivot.md +++ b/docs/build/reference/customtask/Unpivot.md @@ -2,10 +2,12 @@ title: "Unpivot" description: "Given a list of table columns, transforms those columns into attribute-value pairs." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Unpivot + Given a list of table columns, transforms those columns into attribute-value pairs. diff --git a/docs/build/reference/customtask/XmlParserOperator.md b/docs/build/reference/customtask/XmlParserOperator.md index f85693747..79ac9a3ee 100644 --- a/docs/build/reference/customtask/XmlParserOperator.md +++ b/docs/build/reference/customtask/XmlParserOperator.md @@ -2,10 +2,12 @@ title: "Parse XML" description: "Takes exactly one input and reads either the defined inputPath or the first value of the first entity as XML document. Then executes the given output entity schema similar to the XML dataset to construct the result entities." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Parse XML + Takes exactly one input and reads either the defined inputPath or the first value of the first entity as XML document. Then executes the given output entity schema similar to the XML dataset to construct the result entities. diff --git a/docs/build/reference/customtask/addProjectFiles.md b/docs/build/reference/customtask/addProjectFiles.md index 8316f77a9..0034efe25 100644 --- a/docs/build/reference/customtask/addProjectFiles.md +++ b/docs/build/reference/customtask/addProjectFiles.md @@ -2,10 +2,12 @@ title: "Add project files" description: "Adds file resources to the project that are piped into the input port." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Add project files + Adds file resources to the project that are piped into the input port. diff --git a/docs/build/reference/customtask/cmem-plugin-jq-workflow.md b/docs/build/reference/customtask/cmem-plugin-jq-workflow.md index 2b4a4356e..edc2c299d 100644 --- a/docs/build/reference/customtask/cmem-plugin-jq-workflow.md +++ b/docs/build/reference/customtask/cmem-plugin-jq-workflow.md @@ -2,11 +2,13 @@ title: "jq" description: "Process a JSON document with a jq filter / program." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # jq + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_auth-workflow-auth-OAuth2.md b/docs/build/reference/customtask/cmem_plugin_auth-workflow-auth-OAuth2.md index cf5bc10b1..7b98aee33 100644 --- a/docs/build/reference/customtask/cmem_plugin_auth-workflow-auth-OAuth2.md +++ b/docs/build/reference/customtask/cmem_plugin_auth-workflow-auth-OAuth2.md @@ -2,11 +2,13 @@ title: "OAuth2 Authentication" description: "Provide an OAuth2 access token for other tasks (via config port)." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # OAuth2 Authentication + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_graph_insights-Update.md b/docs/build/reference/customtask/cmem_plugin_graph_insights-Update.md index ae62433a1..c77c58786 100644 --- a/docs/build/reference/customtask/cmem_plugin_graph_insights-Update.md +++ b/docs/build/reference/customtask/cmem_plugin_graph_insights-Update.md @@ -2,11 +2,13 @@ title: "Update Graph Insights Snapshots" description: "Update one or more snapshots, optionally selected by affected graph." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Update Graph Insights Snapshots + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_graphql-workflow-graphql-GraphQLPlugin.md b/docs/build/reference/customtask/cmem_plugin_graphql-workflow-graphql-GraphQLPlugin.md index 84a51de24..0cdbb69a0 100644 --- a/docs/build/reference/customtask/cmem_plugin_graphql-workflow-graphql-GraphQLPlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_graphql-workflow-graphql-GraphQLPlugin.md @@ -2,11 +2,13 @@ title: "GraphQL query" description: "Executes a custom GraphQL query to a GraphQL endpoint and saves result to a JSON dataset." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # GraphQL query + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_irdi-workflow-irdi_plugin-IrdiPlugin.md b/docs/build/reference/customtask/cmem_plugin_irdi-workflow-irdi_plugin-IrdiPlugin.md index 586798d86..6707706f7 100644 --- a/docs/build/reference/customtask/cmem_plugin_irdi-workflow-irdi_plugin-IrdiPlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_irdi-workflow-irdi_plugin-IrdiPlugin.md @@ -2,11 +2,13 @@ title: "Generate base36 IRDIs" description: "Create unique ECLASS IRDIs." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Generate base36 IRDIs + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_jira-JqlQuery.md b/docs/build/reference/customtask/cmem_plugin_jira-JqlQuery.md index c42d05ce4..c741afb46 100644 --- a/docs/build/reference/customtask/cmem_plugin_jira-JqlQuery.md +++ b/docs/build/reference/customtask/cmem_plugin_jira-JqlQuery.md @@ -2,11 +2,13 @@ title: "JQL query" description: "Search and retrieve JIRA issues." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # JQL query + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md b/docs/build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md index 629e6e5c8..b1cac42af 100644 --- a/docs/build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md +++ b/docs/build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md @@ -2,11 +2,13 @@ title: "Kafka Consumer (Receive Messages)" description: "Reads messages from a Kafka topic and saves it to a messages dataset (Consumer)." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Kafka Consumer (Receive Messages) + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_kafka-SendMessages.md b/docs/build/reference/customtask/cmem_plugin_kafka-SendMessages.md index e65bad336..69a75540d 100644 --- a/docs/build/reference/customtask/cmem_plugin_kafka-SendMessages.md +++ b/docs/build/reference/customtask/cmem_plugin_kafka-SendMessages.md @@ -2,11 +2,13 @@ title: "Kafka Producer (Send Messages)" description: "Reads a messages dataset and sends records to a Kafka topic (Producer)." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Kafka Producer (Send Messages) + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_kubernetes-Execute.md b/docs/build/reference/customtask/cmem_plugin_kubernetes-Execute.md index 6ab16a826..d0508c946 100644 --- a/docs/build/reference/customtask/cmem_plugin_kubernetes-Execute.md +++ b/docs/build/reference/customtask/cmem_plugin_kubernetes-Execute.md @@ -2,11 +2,13 @@ title: "Execute a command in a kubernetes pod" description: "Connect to a cluster, execute a command and gather the output." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Execute a command in a kubernetes pod + !!! note inline end "Python Plugin" @@ -22,6 +24,7 @@ This plugin enables execution of commands inside Kubernetes pods and captures th - Supports multiple connection types: - **In-cluster**: Uses the service account kubernetes gives to pods (for plugins running inside k8s) + - **Explicit config**: Uses a YAML kubeconfig file for external connections - Executes shell commands in specified pods within namespaces - Captures both stdout and stderr output diff --git a/docs/build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md b/docs/build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md index ee591289f..99d5d0064 100644 --- a/docs/build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md +++ b/docs/build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md @@ -2,11 +2,13 @@ title: "Create Embeddings" description: "Fetch and output LLM created embeddings from input entities." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Create Embeddings + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md b/docs/build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md index 8a07710ec..4e1ae72a0 100644 --- a/docs/build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md +++ b/docs/build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md @@ -2,11 +2,13 @@ title: "Execute Instructions" description: "Send instructions (prompt) to an LLM and process the result." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Execute Instructions + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_loopwf-task-StartWorkflow.md b/docs/build/reference/customtask/cmem_plugin_loopwf-task-StartWorkflow.md index 2ada6d81d..73c1c4288 100644 --- a/docs/build/reference/customtask/cmem_plugin_loopwf-task-StartWorkflow.md +++ b/docs/build/reference/customtask/cmem_plugin_loopwf-task-StartWorkflow.md @@ -2,11 +2,13 @@ title: "Start Workflow per Entity" description: "Loop over the output of a task and start a sub-workflow for each entity." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Start Workflow per Entity + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_mattermost.md b/docs/build/reference/customtask/cmem_plugin_mattermost.md index d0c51d068..d1236352d 100644 --- a/docs/build/reference/customtask/cmem_plugin_mattermost.md +++ b/docs/build/reference/customtask/cmem_plugin_mattermost.md @@ -2,11 +2,13 @@ title: "Send Mattermost messages" description: "Send messages to Mattermost channels and/or users." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Send Mattermost messages + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_nextcloud-Download.md b/docs/build/reference/customtask/cmem_plugin_nextcloud-Download.md index a6917aaae..c57d00375 100644 --- a/docs/build/reference/customtask/cmem_plugin_nextcloud-Download.md +++ b/docs/build/reference/customtask/cmem_plugin_nextcloud-Download.md @@ -2,11 +2,13 @@ title: "Download Nextcloud files" description: "Download files from a given Nextcloud instance." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Download Nextcloud files + !!! note inline end "Python Plugin" @@ -22,13 +24,13 @@ directory from which files should be downloaded. Additionally, you may define fi include or exclude specific files within the selected directory. The files are not downloaded to the project resources, but are only available within the workflow itself. -#### Nextcloud List Files input +## Nextcloud List Files input If this workflow has an input, it will take the data that comes in instead of the selected values. This works only with the schema used in the **Nextcloud List Files** Plugin. Make sure you still add the appropriate URL, identification and token. -#### Important +### Important To establish a secure connection, you must generate a dedicated [app-specific password and username](https://docs.nextcloud.com/server/latest/user_manual/de/session_management.html) in the Security section of your Nextcloud account settings. Do not use your standard login diff --git a/docs/build/reference/customtask/cmem_plugin_nextcloud-List.md b/docs/build/reference/customtask/cmem_plugin_nextcloud-List.md index e2d46cd30..77be4e742 100644 --- a/docs/build/reference/customtask/cmem_plugin_nextcloud-List.md +++ b/docs/build/reference/customtask/cmem_plugin_nextcloud-List.md @@ -2,11 +2,13 @@ title: "List Nextcloud files" description: "List directories and files from a given Nextcloud folder." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # List Nextcloud files + !!! note inline end "Python Plugin" @@ -22,7 +24,7 @@ Given the URL of the target Nextcloud instance along with your credentials, you directory from which data should be extracted. Additionally, you may define file patterns to include or exclude specific files within the selected directory. -#### Important +## Important To establish a secure connection, you must generate a dedicated [app-specific password and username](https://docs.nextcloud.com/server/latest/user_manual/de/session_management.html) in the Security section of your Nextcloud account settings. Do not use your standard login diff --git a/docs/build/reference/customtask/cmem_plugin_nextcloud-Upload.md b/docs/build/reference/customtask/cmem_plugin_nextcloud-Upload.md index bc5259d28..f3233ca85 100644 --- a/docs/build/reference/customtask/cmem_plugin_nextcloud-Upload.md +++ b/docs/build/reference/customtask/cmem_plugin_nextcloud-Upload.md @@ -2,11 +2,13 @@ title: "Upload files to Nextcloud" description: "Upload files to a given Nextcloud instance." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Upload files to Nextcloud + !!! note inline end "Python Plugin" @@ -20,12 +22,12 @@ This workflow task uploads files to a specified Nextcloud service instance. Given the URL of the target Nextcloud instance along with your credentials, you can specify any directory to which files should be uploaded. -### Input +## Input If this workflow has an input, it will take the data that comes in instead of the selected source file. -#### Important +### Important To establish a secure connection, you must generate a dedicated [app-specific password and username](https://docs.nextcloud.com/server/latest/user_manual/de/session_management.html) in the Security section of your Nextcloud account settings. Do not use your standard login diff --git a/docs/build/reference/customtask/cmem_plugin_office365-Download.md b/docs/build/reference/customtask/cmem_plugin_office365-Download.md index 4cbdadfe8..956864a3c 100644 --- a/docs/build/reference/customtask/cmem_plugin_office365-Download.md +++ b/docs/build/reference/customtask/cmem_plugin_office365-Download.md @@ -2,11 +2,13 @@ title: "Download Office 365 Files" description: "Download files from Microsoft OneDrive or Sites" icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Download Office 365 Files + !!! note inline end "Python Plugin" @@ -28,7 +30,7 @@ Admin consent is required to activate these permissions. With this setup, anyone with the secret can access all users' OneDrives and all Sharepoint/Team sites. -#### Important +## Important Make sure only trusted admins can create or manage secrets! Whoever holds the secrets has all the access to granted resources so best not to distribute diff --git a/docs/build/reference/customtask/cmem_plugin_office365-List.md b/docs/build/reference/customtask/cmem_plugin_office365-List.md index 529d3c0d4..099da60d9 100644 --- a/docs/build/reference/customtask/cmem_plugin_office365-List.md +++ b/docs/build/reference/customtask/cmem_plugin_office365-List.md @@ -2,11 +2,13 @@ title: "List Office 365 Files" description: "List files from OneDrive or Sites" icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # List Office 365 Files + !!! note inline end "Python Plugin" @@ -28,7 +30,7 @@ Admin consent is required to activate these permissions. With this setup, anyone with the secret can access all users' OneDrives and all Sharepoint/Team sites. -#### Important +## Important Make sure only trusted admins can create or manage secrets! Whoever holds the secrets has all the access to granted resources so best not to distribute diff --git a/docs/build/reference/customtask/cmem_plugin_office365-Upload.md b/docs/build/reference/customtask/cmem_plugin_office365-Upload.md index e4216ee6a..46cd0d817 100644 --- a/docs/build/reference/customtask/cmem_plugin_office365-Upload.md +++ b/docs/build/reference/customtask/cmem_plugin_office365-Upload.md @@ -2,11 +2,13 @@ title: "Office 365 Upload Files" description: "Upload files to OneDrive or a site Sharepoint" icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Office 365 Upload Files + !!! note inline end "Python Plugin" @@ -28,7 +30,7 @@ Admin consent is required to activate these permissions. With this setup, anyone with the secret can access all users' OneDrives and all Sharepoint/Team sites. -#### Important +## Important Make sure only trusted admins can create or manage secrets! Whoever holds the secrets has all the access to granted resources so best not to distribute diff --git a/docs/build/reference/customtask/cmem_plugin_parameters-ParametersPlugin.md b/docs/build/reference/customtask/cmem_plugin_parameters-ParametersPlugin.md index e8892a8ed..9d3e05b64 100644 --- a/docs/build/reference/customtask/cmem_plugin_parameters-ParametersPlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_parameters-ParametersPlugin.md @@ -2,11 +2,13 @@ title: "Set or Overwrite parameters" description: "Connect this task to a config port of another task in order to set or overwrite the parameter values of this task." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Set or Overwrite parameters + !!! note inline end "Python Plugin" @@ -27,7 +29,7 @@ You can also use multiline values with `|` Example parameter configuration: -``` +```text url: http://example.org method: GET query: | diff --git a/docs/build/reference/customtask/cmem_plugin_pdf_extract-pdf_extract-PdfExtract.md b/docs/build/reference/customtask/cmem_plugin_pdf_extract-pdf_extract-PdfExtract.md index a5fd3405d..a740f51fe 100644 --- a/docs/build/reference/customtask/cmem_plugin_pdf_extract-pdf_extract-PdfExtract.md +++ b/docs/build/reference/customtask/cmem_plugin_pdf_extract-pdf_extract-PdfExtract.md @@ -2,11 +2,13 @@ title: "Extract from PDF files" description: "Extract text and tables from PDF files" icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Extract from PDF files + !!! note inline end "Python Plugin" @@ -24,7 +26,7 @@ The output is a JSON string on the path `pdf_extract_output`. The format depends ### Output one entity/value per file -``` +```text { "metadata": { "Filename": "sample.pdf", @@ -50,7 +52,7 @@ The output is a JSON string on the path `pdf_extract_output`. The format depends ### Output one entity/value for all files -``` +```text [ { "metadata": {"Filename": "file1.pdf", ...}, diff --git a/docs/build/reference/customtask/cmem_plugin_pgvector-Search.md b/docs/build/reference/customtask/cmem_plugin_pgvector-Search.md index 5221588da..eb5e39297 100644 --- a/docs/build/reference/customtask/cmem_plugin_pgvector-Search.md +++ b/docs/build/reference/customtask/cmem_plugin_pgvector-Search.md @@ -2,11 +2,13 @@ title: "Search Vector Embeddings" description: "Search for top-k metadata stored in Postgres Vector Store (PGVector)." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Search Vector Embeddings + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_pgvector-Store.md b/docs/build/reference/customtask/cmem_plugin_pgvector-Store.md index f870237c9..a4ad46d51 100644 --- a/docs/build/reference/customtask/cmem_plugin_pgvector-Store.md +++ b/docs/build/reference/customtask/cmem_plugin_pgvector-Store.md @@ -2,11 +2,13 @@ title: "Store Vector Embeddings" description: "Store embeddings into Postgres Vector Store (PGVector)." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Store Vector Embeddings + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_project_resources-List.md b/docs/build/reference/customtask/cmem_plugin_project_resources-List.md index dbf0214cd..aca9266d6 100644 --- a/docs/build/reference/customtask/cmem_plugin_project_resources-List.md +++ b/docs/build/reference/customtask/cmem_plugin_project_resources-List.md @@ -2,11 +2,13 @@ title: "List project files" description: "List file resources from the project." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # List project files + !!! note inline end "Python Plugin" @@ -32,7 +34,7 @@ The regular expression has to match the `fullPath` of the file and is case sensi Given this list of example files of a project: -``` +```text dataset.csv my-dataset.xml json/example.json diff --git a/docs/build/reference/customtask/cmem_plugin_project_resources-UploadLocalFiles.md b/docs/build/reference/customtask/cmem_plugin_project_resources-UploadLocalFiles.md index 8e4efe9ba..bf90163cf 100644 --- a/docs/build/reference/customtask/cmem_plugin_project_resources-UploadLocalFiles.md +++ b/docs/build/reference/customtask/cmem_plugin_project_resources-UploadLocalFiles.md @@ -2,11 +2,13 @@ title: "Upload local files" description: "Replace a file dataset resource with a local file or upload multiple local files to a project." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Upload local files + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md b/docs/build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md index 172227cd5..1ef5dd36c 100644 --- a/docs/build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md +++ b/docs/build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md @@ -2,11 +2,13 @@ title: "SOQL query (Salesforce)" description: "Executes a custom Salesforce Object Query (SOQL) to return sets of data your organization's Salesforce account." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # SOQL query (Salesforce) + !!! note inline end "Python Plugin" @@ -35,13 +37,13 @@ Examples: Retrieve all standard fields from all Lead resources. (without parser validation) -``` +```text SELECT FIELDS(STANDARD) FROM Lead ``` Retrieve first name and last name of all Contact resources. (with parser validation) -``` +```text SELECT Contact.Firstname, Contact.Lastname FROM Contact ``` diff --git a/docs/build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md b/docs/build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md index cced08940..60c1c8ba0 100644 --- a/docs/build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md +++ b/docs/build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md @@ -2,11 +2,13 @@ title: "Create/Update Salesforce Objects" description: "Manipulate data in your organization's Salesforce account." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Create/Update Salesforce Objects + !!! note inline end "Python Plugin" @@ -38,7 +40,7 @@ Example: - Your input SPARQL task looks like this. Note that the variables need to match the field strings from the Salesforce data model: -``` +```text SELECT DISTINCT FirstName, LastName, Email ... ``` diff --git a/docs/build/reference/customtask/cmem_plugin_shapes-plugin_shapes-ShapesPlugin.md b/docs/build/reference/customtask/cmem_plugin_shapes-plugin_shapes-ShapesPlugin.md index 94cbab250..824b9926e 100644 --- a/docs/build/reference/customtask/cmem_plugin_shapes-plugin_shapes-ShapesPlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_shapes-plugin_shapes-ShapesPlugin.md @@ -2,11 +2,13 @@ title: "Generate SHACL shapes from data" description: "Generate SHACL node and property shapes from a data graph" icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Generate SHACL shapes from data + !!! note inline end "Python Plugin" @@ -55,7 +57,7 @@ option exposes your IP address to prefix.cc but no other data is shared. If unsu Provide the list of properties (as IRIs) for which you do not want to create property shapes. Example: -``` +```text http://www.w3.org/1999/02/22-rdf-syntax-ns#type http://xmlns.com/foaf/0.1/familyName ``` diff --git a/docs/build/reference/customtask/cmem_plugin_splitfile-plugin_splitfile-SplitFilePlugin.md b/docs/build/reference/customtask/cmem_plugin_splitfile-plugin_splitfile-SplitFilePlugin.md index 54affc1fb..c6ed21121 100644 --- a/docs/build/reference/customtask/cmem_plugin_splitfile-plugin_splitfile-SplitFilePlugin.md +++ b/docs/build/reference/customtask/cmem_plugin_splitfile-plugin_splitfile-SplitFilePlugin.md @@ -2,11 +2,13 @@ title: "Split file" description: "Split a file into multiple parts with a specified size." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Split file + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_ssh-Download.md b/docs/build/reference/customtask/cmem_plugin_ssh-Download.md index bddfb8845..9488c55b5 100644 --- a/docs/build/reference/customtask/cmem_plugin_ssh-Download.md +++ b/docs/build/reference/customtask/cmem_plugin_ssh-Download.md @@ -2,11 +2,13 @@ title: "Download SSH files" description: "Download files from a given SSH instance" icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Download SSH files + !!! note inline end "Python Plugin" @@ -24,14 +26,14 @@ You can also define a regular expression to include or exclude specific files. There is also an option to prevent files in subfolders from being included. -#### Authentication Methods +## Authentication Methods * **Password:** Only the password will be used for authentication. The private key field is ignored, even if filled. * **Key:** The private key will be used for authentication. If the key is encrypted, the password will be used to decrypt it. -#### Error handling modes +### Error handling modes * **Ignore:** Ignores the permission rights of files and lists downloads all files it has access to. Skips folders and files when there is no correct permission. diff --git a/docs/build/reference/customtask/cmem_plugin_ssh-Execute.md b/docs/build/reference/customtask/cmem_plugin_ssh-Execute.md index ca7eebb21..43b11326b 100644 --- a/docs/build/reference/customtask/cmem_plugin_ssh-Execute.md +++ b/docs/build/reference/customtask/cmem_plugin_ssh-Execute.md @@ -2,11 +2,13 @@ title: "Execute commands via SSH" description: "Execute commands on a given SSH instance." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Execute commands via SSH + !!! note inline end "Python Plugin" @@ -20,7 +22,7 @@ This workflow task executes commands on a given SSH instance. By providing the hostname, username, port and authentication method, you can specify the folder in which the command should be executed in. -#### Input Methods +## Input Methods * **No input:** The command will be executed with no input attached to the plugin. Stdin is non-existent in this case. @@ -28,7 +30,7 @@ is non-existent in this case. files that are connected via the input port of the plugin. This also allows for looping over multiple files executing the same command over them. -#### Output Methods +### Output Methods * **Structured process output:** The output will produce entities with its own schema including the stdout and stderr as well as the exit code to confirm the execution of the command. diff --git a/docs/build/reference/customtask/cmem_plugin_ssh-List.md b/docs/build/reference/customtask/cmem_plugin_ssh-List.md index a0e898c43..dc715df62 100644 --- a/docs/build/reference/customtask/cmem_plugin_ssh-List.md +++ b/docs/build/reference/customtask/cmem_plugin_ssh-List.md @@ -2,11 +2,13 @@ title: "List SSH files" description: "List files from a given SSH instance." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # List SSH files + !!! note inline end "Python Plugin" @@ -24,14 +26,14 @@ You can also define a regular expression to include or exclude specific files. There is also an option to prevent files in subfolders from being included. -#### Authentication Methods +## Authentication Methods * **Password:** Only the password will be used for authentication. The private key field is ignored, even if filled. * **Key:** The private key will be used for authentication. If the key is encrypted, the password will be used to decrypt it. -#### Error handling modes +### Error handling modes * **Ignore:** Ignores the permission rights of files and lists them all. Skips folders when there is no correct permission. diff --git a/docs/build/reference/customtask/cmem_plugin_ssh-Upload.md b/docs/build/reference/customtask/cmem_plugin_ssh-Upload.md index 8181dcb5a..e16cc9b84 100644 --- a/docs/build/reference/customtask/cmem_plugin_ssh-Upload.md +++ b/docs/build/reference/customtask/cmem_plugin_ssh-Upload.md @@ -2,11 +2,13 @@ title: "Upload SSH files" description: "Upload files to a given SSH instance." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Upload SSH files + !!! note inline end "Python Plugin" @@ -20,14 +22,14 @@ This workflow task uploads files to a given SSH instance. By providing the hostname, username, port and authentication method, you can specify the folder the data should be uploaded to. -#### Authentication Methods +## Authentication Methods * **Password:** Only the password will be used for authentication. The private key field is ignored, even if filled. * **Key:** The private key will be used for authentication. If the key is encrypted, the password will be used to decrypt it. -#### Note +### Note * If a connection cannot be established within 20 seconds, a timeout occurs. * Currently supported key types are: RSA, DSS, ECDSA, Ed25519. diff --git a/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateEntities.md b/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateEntities.md index 7c40a774e..7370aaaea 100644 --- a/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateEntities.md +++ b/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateEntities.md @@ -2,11 +2,13 @@ title: "Validate Entities" description: "Use a JSON schema to validate entities or a JSON dataset." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Validate Entities + !!! note inline end "Python Plugin" @@ -24,14 +26,14 @@ JSON Schema specification. The used JSON Schema needs to be provided as a JSON Dataset in the project. -### Input Modes +## Input Modes The plugin supports two input modes for validation: 1. **Validate Entities**: Validates entities received from the input port in the workflow. 2. **Validate JSON Dataset**: Validates a JSON dataset stored in the project. - - If the JSON dataset is a JSON array, the schema will validate each object inside the array. - - If the JSON dataset is a JSON object, it will be validated against the schema directly. + - If the JSON dataset is a JSON array, the schema will validate each object inside the array. + - If the JSON dataset is a JSON object, it will be validated against the schema directly. Validated data objects can be sent to an output port for further processing in the workflow or saved in a JSON dataset in the project. diff --git a/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateGraph.md b/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateGraph.md index b7664857c..7089e2718 100644 --- a/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateGraph.md +++ b/docs/build/reference/customtask/cmem_plugin_validation-validate-ValidateGraph.md @@ -2,11 +2,13 @@ title: "Validate Knowledge Graph" description: "Use SHACL shapes to validate resources in a Knowledge Graph." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Validate Knowledge Graph + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_wfreports_get_report.md b/docs/build/reference/customtask/cmem_plugin_wfreports_get_report.md index c8c6897d8..69459d672 100644 --- a/docs/build/reference/customtask/cmem_plugin_wfreports_get_report.md +++ b/docs/build/reference/customtask/cmem_plugin_wfreports_get_report.md @@ -2,11 +2,13 @@ title: "Get workflow report" description: "Output the last report of a workflow as a JSON file." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Get workflow report + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/cmem_plugin_yaml-parse.md b/docs/build/reference/customtask/cmem_plugin_yaml-parse.md index 6d6d81f1f..0c1f8680d 100644 --- a/docs/build/reference/customtask/cmem_plugin_yaml-parse.md +++ b/docs/build/reference/customtask/cmem_plugin_yaml-parse.md @@ -2,11 +2,13 @@ title: "Parse YAML" description: "Parses files, source code or input values as YAML documents." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Parse YAML + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/combine-csv.md b/docs/build/reference/customtask/combine-csv.md index 2c428c125..2102b4633 100644 --- a/docs/build/reference/customtask/combine-csv.md +++ b/docs/build/reference/customtask/combine-csv.md @@ -2,11 +2,13 @@ title: "Combine CSV files" description: "Combine CSV files with the same structure to one dataset." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # Combine CSV files + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/deleteProjectFiles.md b/docs/build/reference/customtask/deleteProjectFiles.md index dd8172137..060a12edc 100644 --- a/docs/build/reference/customtask/deleteProjectFiles.md +++ b/docs/build/reference/customtask/deleteProjectFiles.md @@ -2,10 +2,12 @@ title: "Delete project files" description: "Removes file resources from the project based on a regular expression." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Delete project files + Removes file resources from the project based on a regular expression (regex). @@ -14,7 +16,7 @@ The project-relative path of each file of the current project is tested against Given this list of example files of a project: -``` +```text dataset.csv my-dataset.xml json/example.json diff --git a/docs/build/reference/customtask/downloadFile.md b/docs/build/reference/customtask/downloadFile.md index b9c542c9e..dc5e13bad 100644 --- a/docs/build/reference/customtask/downloadFile.md +++ b/docs/build/reference/customtask/downloadFile.md @@ -2,10 +2,12 @@ title: "Download file" description: "Downloads a file from a given URL." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Download file + Downloads a file from a given URL. diff --git a/docs/build/reference/customtask/eccencaDataPlatformGraphStoreFileUploadOperator.md b/docs/build/reference/customtask/eccencaDataPlatformGraphStoreFileUploadOperator.md index 0cb4b7c41..30d86d195 100644 --- a/docs/build/reference/customtask/eccencaDataPlatformGraphStoreFileUploadOperator.md +++ b/docs/build/reference/customtask/eccencaDataPlatformGraphStoreFileUploadOperator.md @@ -2,10 +2,12 @@ title: "Upload File to Knowledge Graph" description: "Uploads an N-Triples or Turtle (limited support) file from the file repository to a 'Knowledge Graph' dataset. The output of this operatorcan be the input of datasets that support graph store file upload, e.g. 'Knowledge Graph'. The file will be uploaded to the graph specified in that dataset." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Upload File to Knowledge Graph + Uploads an N-Triples or Turtle (limited support) file from the file repository to a 'Knowledge Graph' dataset. The output of this operatorcan be the input of datasets that support graph store file upload, e.g. 'Knowledge Graph'. The file will be uploaded to the graph specified in that dataset. diff --git a/docs/build/reference/customtask/eccencaRestOperator.md b/docs/build/reference/customtask/eccencaRestOperator.md index 77a6a942d..8c24377e1 100644 --- a/docs/build/reference/customtask/eccencaRestOperator.md +++ b/docs/build/reference/customtask/eccencaRestOperator.md @@ -2,10 +2,12 @@ title: "Execute REST requests" description: "REST operator that fetches and optionally merges data from a REST endpoint. It supports executing multiple requests either via input entities that each overwrite config parameters or via paging. If you only need to download a single file, the 'Download file' operator might be the better option. Most features are currently only supported for JSON REST APIs. From multiple requests the REST operator can produce a merged JSON result, i.e. for JSON it will concatenate all results in a JSON array. Alternatively multiple results can be written directly to file (of a JSON dataset), either as a merged JSON file or one file per request inside a ZIP file. By default the output of this operator is an entity with a single property 'result', which is the (concatenated) JSON string." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Execute REST requests + ## Core parameter overview @@ -59,10 +61,11 @@ to fetch all results. This is currently only supported for JSON requests. In both cases the path to the next page value in the response JSON must be defined via the 'Next page JSON path' parameter. In case of the 'Next page identifier' paging method, also the parameter 'Next page ID query parameter' must be set. + - `Next page JSON path`: The property path in the result JSON where the 'next page' URL/value is provided. E.g. for following response structure, the value for this parameter would be `paging/next`: - ``` + ```text { ..., "paging": { @@ -79,11 +82,11 @@ to fetch all results. This is currently only supported for JSON requests. - `HTTP headers`: This parameter allows to set HTTP headers of the request being made. Each line of the multi-line value should contain a single header, e.g. - ``` + ```text Accept-Language: en-US,en;q=0.5 Cache-Control: max-age=0 ``` - + ## Sending a multipart HTTP file request If the content of a POST request should be sent as file content of a multipart HTTP request, instead of the request body, @@ -92,7 +95,7 @@ following parameter must be configured: - `Multi-part file parameter`: If set to a non-empty value then, instead of a normal POST request, a multipart/form-data file upload request will be executed. The value of this parameter is used as the form parameter name. - + ## Output options By default, the response body of a request is output as value of the 'result' property of a single output entity. @@ -110,7 +113,7 @@ Currently, the following datasets support the processing of ZIP files: JSON, XML If the option 'Read parameters from input' is enabled, it is currently always assumed that multiple requests will be sent. The responses must either be JSON, then the results are merged into a JSON array or the 'Output result as file' option must be enabled in order to write a merged JSON or ZIP file. - + ## Fine-tuning timeouts If requests can take a much longer time than what can usually be expected, it is possible to increase the timeouts to diff --git a/docs/build/reference/customtask/getProjectFiles.md b/docs/build/reference/customtask/getProjectFiles.md index 741a7f6eb..56d81f079 100644 --- a/docs/build/reference/customtask/getProjectFiles.md +++ b/docs/build/reference/customtask/getProjectFiles.md @@ -2,10 +2,12 @@ title: "Get project files" description: "Get file resources from the project." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Get project files + Get file resources from the project. diff --git a/docs/build/reference/customtask/index.md b/docs/build/reference/customtask/index.md index 8b99a7e5f..04c36c71f 100644 --- a/docs/build/reference/customtask/index.md +++ b/docs/build/reference/customtask/index.md @@ -5,7 +5,9 @@ tags: - Build - Reference --- + # Custom Workflow Tasks + A custom workflow task is an operator that can be used in a workflow. diff --git a/docs/build/reference/customtask/setParameters.md b/docs/build/reference/customtask/setParameters.md index 549105168..c01e1f32e 100644 --- a/docs/build/reference/customtask/setParameters.md +++ b/docs/build/reference/customtask/setParameters.md @@ -2,10 +2,12 @@ title: "Set parameters" description: "Set and overwrite parameters of a task." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Set parameters + Set and overwrite parameters of a task. diff --git a/docs/build/reference/customtask/shacl-pyshacl.md b/docs/build/reference/customtask/shacl-pyshacl.md index ad2c264a6..3fe8d5df2 100644 --- a/docs/build/reference/customtask/shacl-pyshacl.md +++ b/docs/build/reference/customtask/shacl-pyshacl.md @@ -2,11 +2,13 @@ title: "SHACL validation with pySHACL" description: "Performs SHACL validation with pySHACL." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask - PythonPlugin --- + # SHACL validation with pySHACL + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/customtask/sparqlCopyOperator.md b/docs/build/reference/customtask/sparqlCopyOperator.md index 92922ffda..28cc6d299 100644 --- a/docs/build/reference/customtask/sparqlCopyOperator.md +++ b/docs/build/reference/customtask/sparqlCopyOperator.md @@ -2,10 +2,12 @@ title: "SPARQL Construct query" description: "A task that executes a SPARQL Construct query on a SPARQL enabled data source and outputs the SPARQL result. If the result should be written to the same RDF store it is read from, the SPARQL Update operator is preferable." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # SPARQL Construct query + A task that executes a SPARQL Construct query on a SPARQL enabled data source and outputs the SPARQL result. If the result should be written to the same RDF store it is read from, the SPARQL Update operator is preferable. diff --git a/docs/build/reference/customtask/sparqlSelectOperator.md b/docs/build/reference/customtask/sparqlSelectOperator.md index 1d14427aa..1fa1372b8 100644 --- a/docs/build/reference/customtask/sparqlSelectOperator.md +++ b/docs/build/reference/customtask/sparqlSelectOperator.md @@ -2,10 +2,12 @@ title: "SPARQL Select query" description: "A task that executes a SPARQL Select query on a SPARQL enabled data source and outputs the SPARQL result. If the SPARQL source is defined on a specific graph, a FROM clause will be added to the query at execution time, except when there already exists a GRAPH or FROM clause in the query. FROM NAMED clauses are not injected." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # SPARQL Select query + The SPARQL SELECT plugin is a task for executing SPARQL SELECT queries on the input RDF data source. diff --git a/docs/build/reference/customtask/sparqlUpdateOperator.md b/docs/build/reference/customtask/sparqlUpdateOperator.md index e0ec0d1ca..63eb3ccf2 100644 --- a/docs/build/reference/customtask/sparqlUpdateOperator.md +++ b/docs/build/reference/customtask/sparqlUpdateOperator.md @@ -2,10 +2,12 @@ title: "SPARQL Update query" description: "A task that outputs SPARQL Update queries for every entity from the input based on a SPARQL Update template. The output of this operator should be connected to the SPARQL datasets to which the results should be written." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # SPARQL Update query + The SPARQL UPDATE query plugin is a task for outputting SPARQL UPDATE queries from the input RDF data source. @@ -29,7 +31,7 @@ the dollar sign (`$`), i.e. filling in input values via placeholders in the temp ### Example of the `Simple` mode -``` +```text DELETE DATA { ${} rdf:label ${"PROP_FROM_ENTITY_SCHEMA2"} } INSERT DATA { ${} rdf:label ${"PROP_FROM_ENTITY_SCHEMA3"} } ``` @@ -44,7 +46,7 @@ can combine variable substitutions with fixed expressions to construct semi-flex ### Example of the `Velocity Engine` mode -``` +```text DELETE DATA { $row.uri("PROP_FROM_ENTITY_SCHEMA1") rdf:label $row.plainLiteral("PROP_FROM_ENTITY_SCHEMA2") } #if ( $row.exists("PROP_FROM_ENTITY_SCHEMA1") ) INSERT DATA { $row.uri("PROP_FROM_ENTITY_SCHEMA1") rdf:label $row.plainLiteral("PROP_FROM_ENTITY_SCHEMA3") } diff --git a/docs/build/reference/customtask/sqlUpdateQueryOperator.md b/docs/build/reference/customtask/sqlUpdateQueryOperator.md index d449b3a11..02b18b3fe 100644 --- a/docs/build/reference/customtask/sqlUpdateQueryOperator.md +++ b/docs/build/reference/customtask/sqlUpdateQueryOperator.md @@ -2,10 +2,12 @@ title: "SQL Update query" description: "A task that outputs SQL queries. The output of this operator should be connected to a remote SQL endpoint on which queries should be executed." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # SQL Update query + A task that outputs SQL queries. diff --git a/docs/build/reference/customtask/tripleRequestOperator.md b/docs/build/reference/customtask/tripleRequestOperator.md index 4ca2d983b..56e89e6e7 100644 --- a/docs/build/reference/customtask/tripleRequestOperator.md +++ b/docs/build/reference/customtask/tripleRequestOperator.md @@ -2,10 +2,12 @@ title: "Request RDF triples" description: "A task that requests all triples from an RDF dataset." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Request RDF triples + A task that requests all triples from an RDF dataset. diff --git a/docs/build/reference/customtask/ucumNormalizationTask.md b/docs/build/reference/customtask/ucumNormalizationTask.md index dc24f6c42..c47a852bf 100644 --- a/docs/build/reference/customtask/ucumNormalizationTask.md +++ b/docs/build/reference/customtask/ucumNormalizationTask.md @@ -2,10 +2,12 @@ title: "Normalize units of measurement" description: "Custom task that will substitute numeric values and pertaining unit symbols with a SI-system-unit normalized representation." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Normalize units of measurement + This custom task substitutes numeric values and pertaining units by its normalized representation in the International System of Units (SI). @@ -89,7 +91,7 @@ While all SI units and decimal prefixes are supported by default, custom or obso # Denier , true , den , g/(9.km) # Grain , true , gr , (45.g)/100 # Pound , true , lb , (45359237.kg)/100000000 , # , lbm - + ``` ## Advanced Parameter diff --git a/docs/build/reference/customtask/validateXsdOperator.md b/docs/build/reference/customtask/validateXsdOperator.md index 0de908b80..1438d6d5c 100644 --- a/docs/build/reference/customtask/validateXsdOperator.md +++ b/docs/build/reference/customtask/validateXsdOperator.md @@ -2,10 +2,12 @@ title: "Validate XML" description: "Validates an XML dataset against a provided XML schema (XSD) file. Any errors are written to the output. Can be used in conjunction with the `Cancel Workflow` operator in order to stop the workflow if errors have been found." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # Validate XML + Validates an XML dataset against a provided XML schema (XSD) file. Any errors are written to the output. Can be used in conjunction with the `Cancel Workflow` operator in order to stop the workflow if errors have been found. diff --git a/docs/build/reference/customtask/xsltOperator.md b/docs/build/reference/customtask/xsltOperator.md index 69521813e..9dc9dd1af 100644 --- a/docs/build/reference/customtask/xsltOperator.md +++ b/docs/build/reference/customtask/xsltOperator.md @@ -2,10 +2,12 @@ title: "XSLT" description: "A task that converts an XML resource via an XSLT script and writes the transformed output into a file resource." icon: octicons/cross-reference-24 -tags: +tags: - WorkflowTask --- + # XSLT + ## Description of the plugin @@ -24,9 +26,9 @@ If you are well-versed in the XSL ecosystem, this is everything you need to know The acronym **XSL** stands for "eXtensible Stylesheet Language". XSL is not a single technology or specification, but a _family of languages_ for processing (transforming) and rendering (presenting) XML documents. It consists of three parts: - 1. XSLT: XSL Transformations - 2. XPath: XML Path Language - 3. XSL-FO: XSL Formatting Objects +1. XSLT: XSL Transformations +2. XPath: XML Path Language +3. XSL-FO: XSL Formatting Objects In a nutshell, this is simply the separation of concerns between "processing" XML and "rendering" the results. diff --git a/docs/build/reference/dataset/Hive.md b/docs/build/reference/dataset/Hive.md index 367bef107..a74aa48a6 100644 --- a/docs/build/reference/dataset/Hive.md +++ b/docs/build/reference/dataset/Hive.md @@ -2,10 +2,12 @@ title: "Hive database" description: "Read from or write to an embedded Apache Hive endpoint." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Hive database + Read from or write to an embedded Apache Hive endpoint. diff --git a/docs/build/reference/dataset/Jdbc.md b/docs/build/reference/dataset/Jdbc.md index b85563bc6..48522e9d4 100644 --- a/docs/build/reference/dataset/Jdbc.md +++ b/docs/build/reference/dataset/Jdbc.md @@ -2,10 +2,12 @@ title: "Remote SQL endpoint" description: "Connect to an existing JDBC endpoint." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Remote SQL endpoint + ## General usage @@ -30,7 +32,7 @@ Please make sure that you use the correct syntax for each DBMS, otherwise you ma Here are templates for supported database systems: -``` +```text oracle (external driver needed): jdbc:oracle:thin:@{host}[:{port}]/{database} @@ -87,6 +89,7 @@ Supported DBMS: - only applies when appending data to an existing table and having `Force Spark Execution` disabled - Both the server parameter `local_infile` and the client parameter `allowLoadLocalInfile` must be enabled, e.g. by adding `allowLoadLocalInfile=true` to the JDBC URL. For MySQL starting with version 8 the `local_infile` parameter is by default disabled! + - If during writing to a MySQL/MariaDB a `[…] You have an error in your SQL syntax […]` error is encountered make sure ANSIquotes are used. `sql_mode=ANSI_QUOTES` can be set via a URL parameter to the JDBC connection string like: @@ -115,7 +118,7 @@ spark.sql.options { # jdbc:db2://host:port) is used to specify the driver. For each protocol on the list a jar classname and optional download # location can be provided. jdbc.drivers = "db2,mysql" - + # Some database systems use licenses that are to loose or restrictive for us to ship the drivers. Therefore a path # to a jar file containing the driver and the name of driver can be specified here. jdbc.db2.jar = "/home/user/Jars/db2jcc-db2jcc4.jar" diff --git a/docs/build/reference/dataset/LocalInternalDataset.md b/docs/build/reference/dataset/LocalInternalDataset.md index 3492705e9..9431dafd6 100644 --- a/docs/build/reference/dataset/LocalInternalDataset.md +++ b/docs/build/reference/dataset/LocalInternalDataset.md @@ -2,10 +2,12 @@ title: "Internal dataset (single graph)" description: "Dataset for storing entities between workflow steps. This variant does use the same graph for all internal datasets in a workflow. The underlying dataset type can be configured using the `dataset.internal.*` configuration parameters." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Internal dataset (single graph) + Dataset for storing entities between workflow steps. This variant does use the same graph for all internal datasets in a workflow. The underlying dataset type can be configured using the `dataset.internal.*` configuration parameters. diff --git a/docs/build/reference/dataset/SnowflakeJdbc.md b/docs/build/reference/dataset/SnowflakeJdbc.md index 1fe8a1f74..e491e318a 100644 --- a/docs/build/reference/dataset/SnowflakeJdbc.md +++ b/docs/build/reference/dataset/SnowflakeJdbc.md @@ -2,10 +2,12 @@ title: "Snowflake SQL endpoint" description: "Connect to Snowflake JDBC endpoint." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Snowflake SQL endpoint + This dataset supports connections to the Snowflake JDBC endpoint. diff --git a/docs/build/reference/dataset/alignment.md b/docs/build/reference/dataset/alignment.md index 4511d448e..8418723e7 100644 --- a/docs/build/reference/dataset/alignment.md +++ b/docs/build/reference/dataset/alignment.md @@ -2,10 +2,12 @@ title: "Alignment" description: "Writes the alignment format specified at http://alignapi.gforge.inria.fr/format.html." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Alignment + Writes the alignment format specified at . diff --git a/docs/build/reference/dataset/avro.md b/docs/build/reference/dataset/avro.md index 762aae031..34de8525c 100644 --- a/docs/build/reference/dataset/avro.md +++ b/docs/build/reference/dataset/avro.md @@ -2,10 +2,12 @@ title: "Avro" description: "Read from or write to an Apache Avro file." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Avro + Read from or write to an Apache Avro file. diff --git a/docs/build/reference/dataset/binaryFile.md b/docs/build/reference/dataset/binaryFile.md index 553f4065d..f3ae28b97 100644 --- a/docs/build/reference/dataset/binaryFile.md +++ b/docs/build/reference/dataset/binaryFile.md @@ -2,10 +2,12 @@ title: "Binary file" description: "Reads and writes binary files. A typical use-case for this dataset is to process PDF documents or images." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Binary file + Reads and writes binary files. A typical use-case for this dataset is to process PDF documents or images using workflow operators that accept or output files. If an operator reads from this dataset that does not support files directly (such as transformation or linking tasks), it will only receive the file metadata, which includes the file path. diff --git a/docs/build/reference/dataset/csv.md b/docs/build/reference/dataset/csv.md index 5e5d558d1..c1a7962cb 100644 --- a/docs/build/reference/dataset/csv.md +++ b/docs/build/reference/dataset/csv.md @@ -2,10 +2,12 @@ title: "CSV" description: "Read from or write to an CSV file." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # CSV + Read from or write to an CSV file. diff --git a/docs/build/reference/dataset/eccencaDataPlatform.md b/docs/build/reference/dataset/eccencaDataPlatform.md index aece1073e..05748ba89 100644 --- a/docs/build/reference/dataset/eccencaDataPlatform.md +++ b/docs/build/reference/dataset/eccencaDataPlatform.md @@ -2,10 +2,12 @@ title: "Knowledge Graph" description: "Read RDF from or write RDF to a Knowledge Graph embedded in Corporate Memory." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Knowledge Graph + The Knowledge Graph plugin is a dataset for reading and writing RDF to a knowledge graph embedded in Corporate Memory. diff --git a/docs/build/reference/dataset/excel.md b/docs/build/reference/dataset/excel.md index 21376adbb..efa79f51d 100644 --- a/docs/build/reference/dataset/excel.md +++ b/docs/build/reference/dataset/excel.md @@ -2,10 +2,12 @@ title: "Excel" description: "Read from or write to an Excel workbook in Open XML format (XLSX). The sheet is selected by specifying it as type in the subsequent workflow operator." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Excel + Read from or write to an Excel workbook in Open XML format (XLSX). The sheet is selected by specifying it as type in the subsequent workflow operator. diff --git a/docs/build/reference/dataset/file.md b/docs/build/reference/dataset/file.md index 9cb33cc7a..1b8127997 100644 --- a/docs/build/reference/dataset/file.md +++ b/docs/build/reference/dataset/file.md @@ -2,10 +2,12 @@ title: "RDF file" description: "Dataset which retrieves and writes all entities from/to an RDF file. For reading, the dataset is loaded in-memory and thus the size is restricted by the available memory. Large datasets should be loaded into an external RDF store and retrieved using the SPARQL dataset instead." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # RDF file + Dataset which retrieves and writes all entities from/to an RDF file. For reading, the dataset is loaded in-memory and thus the size is restricted by the available memory. Large datasets should be loaded into an external RDF store and retrieved using the SPARQL dataset instead. diff --git a/docs/build/reference/dataset/googlespreadsheet.md b/docs/build/reference/dataset/googlespreadsheet.md index caa172a35..d5593c435 100644 --- a/docs/build/reference/dataset/googlespreadsheet.md +++ b/docs/build/reference/dataset/googlespreadsheet.md @@ -2,10 +2,12 @@ title: "Excel (Google Drive)" description: "Read data from a remote Google Spreadsheet." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Excel (Google Drive) + The dataset needs the document id of a "share via url" sheet on Google Drive as input. @@ -13,7 +15,7 @@ It will automatically correct the URL and add the "export as xlsx" option to a n that will be used to download an Excel Spreadsheet. The download will be cached and treated the same way as an xlsx file in the Excel Dataset. -### Caching +## Caching The advanced parameter `invalidateCacheAfter` allows the user to specify a duration of the file cache after which it is refreshed. diff --git a/docs/build/reference/dataset/inMemory.md b/docs/build/reference/dataset/inMemory.md index 6285316e3..c026ce84f 100644 --- a/docs/build/reference/dataset/inMemory.md +++ b/docs/build/reference/dataset/inMemory.md @@ -2,10 +2,12 @@ title: "In-memory dataset" description: "A Dataset that holds all data in-memory." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # In-memory dataset + A Dataset that holds all data in-memory. diff --git a/docs/build/reference/dataset/index.md b/docs/build/reference/dataset/index.md index 12daf45c0..8e8cd9afa 100644 --- a/docs/build/reference/dataset/index.md +++ b/docs/build/reference/dataset/index.md @@ -5,7 +5,9 @@ tags: - Build - Reference --- + # Datasets + Datasets are collections of data that can be read or written. diff --git a/docs/build/reference/dataset/internal.md b/docs/build/reference/dataset/internal.md index 561b36209..07b449b09 100644 --- a/docs/build/reference/dataset/internal.md +++ b/docs/build/reference/dataset/internal.md @@ -2,10 +2,12 @@ title: "Internal dataset" description: "Dataset for storing entities between workflow steps. The underlying dataset type can be configured using the `dataset.internal.*` configuration parameters." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Internal dataset + Dataset for storing entities between workflow steps. The underlying dataset type can be configured using the `dataset.internal.*` configuration parameters. diff --git a/docs/build/reference/dataset/json.md b/docs/build/reference/dataset/json.md index 4cf38405f..693bf02ec 100644 --- a/docs/build/reference/dataset/json.md +++ b/docs/build/reference/dataset/json.md @@ -2,10 +2,12 @@ title: "JSON" description: "Read from or write to a JSON or JSON Lines file." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # JSON + Typically, this dataset is used to transform an JSON file to another format, e.g., to RDF. diff --git a/docs/build/reference/dataset/multiCsv.md b/docs/build/reference/dataset/multiCsv.md index 915e0c0f1..c1a299352 100644 --- a/docs/build/reference/dataset/multiCsv.md +++ b/docs/build/reference/dataset/multiCsv.md @@ -2,10 +2,12 @@ title: "Multi CSV ZIP" description: "Reads from or writes to multiple CSV files from/to a single ZIP file." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Multi CSV ZIP + Reads from or writes to multiple CSV files from/to a single ZIP file. diff --git a/docs/build/reference/dataset/neo4j.md b/docs/build/reference/dataset/neo4j.md index dbab4b8fd..0289e5ad5 100644 --- a/docs/build/reference/dataset/neo4j.md +++ b/docs/build/reference/dataset/neo4j.md @@ -2,17 +2,19 @@ title: "Neo4j" description: "Neo4j graph" icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Neo4j + Supports reading and writing Neo4j graphs. The following sections outline how graphs are generated and read back. For more information about Neo4j, please refer to the [Neo4j documentation](https://neo4j.com/docs/). -### Nodes +## Nodes For each entity that is written to a Neo4j dataset, a _node_ will be created. A property `uri` will be added to each generated node, which holds the URI of the original entity. diff --git a/docs/build/reference/dataset/office365preadsheet.md b/docs/build/reference/dataset/office365preadsheet.md index 8b8f49fbe..c86412daf 100644 --- a/docs/build/reference/dataset/office365preadsheet.md +++ b/docs/build/reference/dataset/office365preadsheet.md @@ -2,10 +2,12 @@ title: "Excel (OneDrive, Office365)" description: "Read data from a remote onedrive or Office365 Spreadsheet." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Excel (OneDrive, Office365) + The dataset needs the URL of a "share via link" sheet on Office 365/OneDrive as input. diff --git a/docs/build/reference/dataset/orc.md b/docs/build/reference/dataset/orc.md index 26dd08857..8c61151e4 100644 --- a/docs/build/reference/dataset/orc.md +++ b/docs/build/reference/dataset/orc.md @@ -2,10 +2,12 @@ title: "ORC" description: "Read from or write to an Apache ORC file." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # ORC + Read from or write to an Apache ORC file. diff --git a/docs/build/reference/dataset/parquet.md b/docs/build/reference/dataset/parquet.md index 1dc31a48e..9f1e0011d 100644 --- a/docs/build/reference/dataset/parquet.md +++ b/docs/build/reference/dataset/parquet.md @@ -2,10 +2,12 @@ title: "Parquet" description: "Read from or write to an Apache Parquet file." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Parquet + Read from or write to an Apache Parquet file. diff --git a/docs/build/reference/dataset/sparkView.md b/docs/build/reference/dataset/sparkView.md index d1e1f1810..bdd441bcf 100644 --- a/docs/build/reference/dataset/sparkView.md +++ b/docs/build/reference/dataset/sparkView.md @@ -2,10 +2,12 @@ title: "Embedded Spark SQL view" description: "Deprecated: Use the embedded SQL endpoint dataset instead." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Embedded Spark SQL view + Deprecated: Use the embedded SQL endpoint dataset instead. diff --git a/docs/build/reference/dataset/sparqlEndpoint.md b/docs/build/reference/dataset/sparqlEndpoint.md index da7d57264..a7573e735 100644 --- a/docs/build/reference/dataset/sparqlEndpoint.md +++ b/docs/build/reference/dataset/sparqlEndpoint.md @@ -2,10 +2,12 @@ title: "SPARQL endpoint" description: "Connects to an existing SPARQL endpoint." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # SPARQL endpoint + The SPARQL endpoint plugin is a dataset for connecting to an existing, remote SPARQL endpoint. diff --git a/docs/build/reference/dataset/sqlEndpoint.md b/docs/build/reference/dataset/sqlEndpoint.md index c35074766..1346eb4f2 100644 --- a/docs/build/reference/dataset/sqlEndpoint.md +++ b/docs/build/reference/dataset/sqlEndpoint.md @@ -2,10 +2,12 @@ title: "Embedded SQL endpoint" description: "Provides a JDBC endpoint that exposes workflow or transformation results as tables, which can be queried using SQL." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Embedded SQL endpoint + _SQL endpoint dataset parameters_ @@ -20,6 +22,7 @@ handle typed arrays and may make working with software like Excel easier. The parameter _aliasMap_ of the endpoint allows the specification of column aliases. The map is a comma separated list of key-value pairs. Each key and value is denoted by ```key:value```. An example for renaming 2 columns (source1, source2 to target1, target2) in the result would be: + ```source1:target1,source2:target2``` Note: Table and column (mapping target) names will be automatically converted to be valid in as many databases as possible. diff --git a/docs/build/reference/dataset/text.md b/docs/build/reference/dataset/text.md index 98ea4a24f..ce76a3589 100644 --- a/docs/build/reference/dataset/text.md +++ b/docs/build/reference/dataset/text.md @@ -2,10 +2,12 @@ title: "Text" description: "Reads and writes plain text files." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # Text + Reads and writes plain text files. diff --git a/docs/build/reference/dataset/xml.md b/docs/build/reference/dataset/xml.md index d1636cd6e..7d1867e36 100644 --- a/docs/build/reference/dataset/xml.md +++ b/docs/build/reference/dataset/xml.md @@ -2,10 +2,12 @@ title: "XML" description: "Read from or write to an XML file." icon: octicons/cross-reference-24 -tags: +tags: - Dataset --- + # XML + Typically, this dataset is used to transform an XML file to another format, e.g., to RDF. It can also be used to generate XML files. diff --git a/docs/build/reference/distancemeasure/PhysicalQuantitiesDistance.md b/docs/build/reference/distancemeasure/PhysicalQuantitiesDistance.md index 84f11af1f..8658c6b99 100644 --- a/docs/build/reference/distancemeasure/PhysicalQuantitiesDistance.md +++ b/docs/build/reference/distancemeasure/PhysicalQuantitiesDistance.md @@ -2,10 +2,12 @@ title: "Compare physical quantities" description: "Computes the distance between two physical quantities." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Compare physical quantities + Computes the distance between two physical quantities. diff --git a/docs/build/reference/distancemeasure/cjkReadingDistance.md b/docs/build/reference/distancemeasure/cjkReadingDistance.md index c6bb8b8e9..4bc9e1cf0 100644 --- a/docs/build/reference/distancemeasure/cjkReadingDistance.md +++ b/docs/build/reference/distancemeasure/cjkReadingDistance.md @@ -2,10 +2,12 @@ title: "CJK reading distance" description: "CJK Reading Distance." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # CJK reading distance + CJK Reading Distance. diff --git a/docs/build/reference/distancemeasure/constantDistance.md b/docs/build/reference/distancemeasure/constantDistance.md index 0e3a7d55d..a0482f21e 100644 --- a/docs/build/reference/distancemeasure/constantDistance.md +++ b/docs/build/reference/distancemeasure/constantDistance.md @@ -2,10 +2,12 @@ title: "Constant similarity value" description: "Always returns a constant similarity value." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Constant similarity value + Always returns a constant similarity value. diff --git a/docs/build/reference/distancemeasure/cosine.md b/docs/build/reference/distancemeasure/cosine.md index f6fa1d447..c9a645c3b 100644 --- a/docs/build/reference/distancemeasure/cosine.md +++ b/docs/build/reference/distancemeasure/cosine.md @@ -2,10 +2,12 @@ title: "Cosine" description: "Cosine Distance Measure." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Cosine + Cosine Distance Measure. diff --git a/docs/build/reference/distancemeasure/date.md b/docs/build/reference/distancemeasure/date.md index 5f638e95f..aaca1fe3b 100644 --- a/docs/build/reference/distancemeasure/date.md +++ b/docs/build/reference/distancemeasure/date.md @@ -2,10 +2,12 @@ title: "Date" description: "The distance in days between two dates ('YYYY-MM-DD' format)." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Date + The distance in days between two dates ('YYYY-MM-DD' format). diff --git a/docs/build/reference/distancemeasure/dateTime.md b/docs/build/reference/distancemeasure/dateTime.md index c3530dee0..aaa4ef17f 100644 --- a/docs/build/reference/distancemeasure/dateTime.md +++ b/docs/build/reference/distancemeasure/dateTime.md @@ -2,10 +2,12 @@ title: "DateTime" description: "Distance between two date time values (xsd:dateTime format) in seconds." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # DateTime + Distance between two date time values (xsd:dateTime format) in seconds. diff --git a/docs/build/reference/distancemeasure/dice.md b/docs/build/reference/distancemeasure/dice.md index 9bc7879aa..cdd71b3d4 100644 --- a/docs/build/reference/distancemeasure/dice.md +++ b/docs/build/reference/distancemeasure/dice.md @@ -2,10 +2,12 @@ title: "Dice coefficient" description: "Dice similarity coefficient." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Dice coefficient + Dice similarity coefficient. diff --git a/docs/build/reference/distancemeasure/equality.md b/docs/build/reference/distancemeasure/equality.md index 4e12096fc..d7d92641a 100644 --- a/docs/build/reference/distancemeasure/equality.md +++ b/docs/build/reference/distancemeasure/equality.md @@ -2,10 +2,12 @@ title: "String equality" description: "Checks for equality of the string representation of the given values. Returns success if string values are equal, failure otherwise. For a numeric comparison of values use the 'Numeric Equality' comparator." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # String equality + Checks for equality of the string representation of the given values. Returns success if string values are equal, failure otherwise. For a numeric comparison of values use the 'Numeric Equality' comparator. diff --git a/docs/build/reference/distancemeasure/greaterThan.md b/docs/build/reference/distancemeasure/greaterThan.md index 7dbc9301b..af5755ed0 100644 --- a/docs/build/reference/distancemeasure/greaterThan.md +++ b/docs/build/reference/distancemeasure/greaterThan.md @@ -2,10 +2,12 @@ title: "Greater than" description: "Checks if the source value is greater than the target value. If both strings are numbers, numerical order is used for comparison. Otherwise, alphanumerical order is used." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Greater than + Checks if the source value is greater than the target value. If both strings are numbers, numerical order is used for comparison. Otherwise, alphanumerical order is used. diff --git a/docs/build/reference/distancemeasure/index.md b/docs/build/reference/distancemeasure/index.md index 4ddf1d116..95ddb0e41 100644 --- a/docs/build/reference/distancemeasure/index.md +++ b/docs/build/reference/distancemeasure/index.md @@ -5,7 +5,9 @@ tags: - Build - Reference --- + # Distance Measures + Distance Measures compute a distance metric between two sets of strings. diff --git a/docs/build/reference/distancemeasure/inequality.md b/docs/build/reference/distancemeasure/inequality.md index 99a305aa7..b320c3fe0 100644 --- a/docs/build/reference/distancemeasure/inequality.md +++ b/docs/build/reference/distancemeasure/inequality.md @@ -2,10 +2,12 @@ title: "Inequality" description: "Returns success if values are not equal, failure otherwise." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Inequality + Returns success if values are not equal, failure otherwise. diff --git a/docs/build/reference/distancemeasure/insideNumericInterval.md b/docs/build/reference/distancemeasure/insideNumericInterval.md index 99f49114f..9376825cf 100644 --- a/docs/build/reference/distancemeasure/insideNumericInterval.md +++ b/docs/build/reference/distancemeasure/insideNumericInterval.md @@ -2,10 +2,12 @@ title: "Inside numeric interval" description: "Checks if a number is contained inside a numeric interval, such as '1900 - 2000'." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Inside numeric interval + Checks if a number is contained inside a numeric interval, such as '1900 - 2000'. diff --git a/docs/build/reference/distancemeasure/isSubstring.md b/docs/build/reference/distancemeasure/isSubstring.md index 3383cd6c7..aada63a37 100644 --- a/docs/build/reference/distancemeasure/isSubstring.md +++ b/docs/build/reference/distancemeasure/isSubstring.md @@ -2,10 +2,12 @@ title: "Is substring" description: "Checks if a source value is a substring of a target value." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Is substring + Checks if a source value is a substring of a target value. diff --git a/docs/build/reference/distancemeasure/jaccard.md b/docs/build/reference/distancemeasure/jaccard.md index 940910c19..c2261da25 100644 --- a/docs/build/reference/distancemeasure/jaccard.md +++ b/docs/build/reference/distancemeasure/jaccard.md @@ -2,10 +2,12 @@ title: "Jaccard" description: "Jaccard similarity coefficient. Divides the matching tokens by the number of distinct tokens from both inputs." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Jaccard + Jaccard similarity coefficient. Divides the matching tokens by the number of distinct tokens from both inputs. diff --git a/docs/build/reference/distancemeasure/jaro.md b/docs/build/reference/distancemeasure/jaro.md index 31faf8587..ddbfd6dec 100644 --- a/docs/build/reference/distancemeasure/jaro.md +++ b/docs/build/reference/distancemeasure/jaro.md @@ -2,10 +2,12 @@ title: "Jaro distance" description: "Matches strings based on the Jaro distance metric." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Jaro distance + The Jaro distance measure calculates the similarity between two strings based on the number and order of common characters, the number of transpositions, and the length of the strings. The Jaro distance is 0 for a perfect match and 1 if there is no similarity between the given strings. diff --git a/docs/build/reference/distancemeasure/jaroWinkler.md b/docs/build/reference/distancemeasure/jaroWinkler.md index d62628766..dcb17cd1d 100644 --- a/docs/build/reference/distancemeasure/jaroWinkler.md +++ b/docs/build/reference/distancemeasure/jaroWinkler.md @@ -2,10 +2,12 @@ title: "Jaro-Winkler distance" description: "Matches strings based on the Jaro-Winkler distance measure." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Jaro-Winkler distance + The Jaro-Winkler distance measure is a variation of the Jaro distance metric. It takes into account the prefixes of the strings being compared and assigns higher weights to matching prefixes. diff --git a/docs/build/reference/distancemeasure/koreanPhonemeDistance.md b/docs/build/reference/distancemeasure/koreanPhonemeDistance.md index 4e8e5d887..f2ac5f8df 100644 --- a/docs/build/reference/distancemeasure/koreanPhonemeDistance.md +++ b/docs/build/reference/distancemeasure/koreanPhonemeDistance.md @@ -2,10 +2,12 @@ title: "Korean phoneme distance" description: "Korean phoneme distance." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Korean phoneme distance + Korean phoneme distance. diff --git a/docs/build/reference/distancemeasure/koreanTranslitDistance.md b/docs/build/reference/distancemeasure/koreanTranslitDistance.md index 23f574860..cdc327685 100644 --- a/docs/build/reference/distancemeasure/koreanTranslitDistance.md +++ b/docs/build/reference/distancemeasure/koreanTranslitDistance.md @@ -2,10 +2,12 @@ title: "Korean translit distance" description: "Transliterated Korean distance." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Korean translit distance + Transliterated Korean distance. diff --git a/docs/build/reference/distancemeasure/levenshtein.md b/docs/build/reference/distancemeasure/levenshtein.md index e33687daa..48a1dcdf8 100644 --- a/docs/build/reference/distancemeasure/levenshtein.md +++ b/docs/build/reference/distancemeasure/levenshtein.md @@ -2,10 +2,12 @@ title: "Normalized Levenshtein distance" description: "Normalized Levenshtein distance. Divides the edit distance by the length of the longer string." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Normalized Levenshtein distance + Normalized Levenshtein distance. Divides the edit distance by the length of the longer string. diff --git a/docs/build/reference/distancemeasure/levenshteinDistance.md b/docs/build/reference/distancemeasure/levenshteinDistance.md index 97dc0b389..908d1d085 100644 --- a/docs/build/reference/distancemeasure/levenshteinDistance.md +++ b/docs/build/reference/distancemeasure/levenshteinDistance.md @@ -2,10 +2,12 @@ title: "Levenshtein distance" description: "Levenshtein distance. Returns a distance value between zero and the size of the string." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Levenshtein distance + Levenshtein distance. Returns a distance value between zero and the size of the string. diff --git a/docs/build/reference/distancemeasure/lowerThan.md b/docs/build/reference/distancemeasure/lowerThan.md index 3633149d0..aace98021 100644 --- a/docs/build/reference/distancemeasure/lowerThan.md +++ b/docs/build/reference/distancemeasure/lowerThan.md @@ -2,10 +2,12 @@ title: "Lower than" description: "Checks if the source value is lower than the target value." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Lower than + Checks if the source value is lower than the target value. diff --git a/docs/build/reference/distancemeasure/num.md b/docs/build/reference/distancemeasure/num.md index bae92bb7e..174ad2465 100644 --- a/docs/build/reference/distancemeasure/num.md +++ b/docs/build/reference/distancemeasure/num.md @@ -2,10 +2,12 @@ title: "Numeric similarity" description: "Computes the numeric distance between two numbers." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Numeric similarity + Computes the numeric distance between two numbers. diff --git a/docs/build/reference/distancemeasure/numericEquality.md b/docs/build/reference/distancemeasure/numericEquality.md index c667a72a1..080569091 100644 --- a/docs/build/reference/distancemeasure/numericEquality.md +++ b/docs/build/reference/distancemeasure/numericEquality.md @@ -2,10 +2,12 @@ title: "Numeric equality" description: "Compares values numerically instead of their string representation as the 'String Equality' operator does. Allows to set the needed precision of the comparison. A value of 0.0 means that the values must represent exactly the same (floating point) value, values higher than that allow for a margin of tolerance." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Numeric equality + Compares values numerically instead of their string representation as the 'String Equality' operator does. diff --git a/docs/build/reference/distancemeasure/qGrams.md b/docs/build/reference/distancemeasure/qGrams.md index 4998c64a9..6de3a83a4 100644 --- a/docs/build/reference/distancemeasure/qGrams.md +++ b/docs/build/reference/distancemeasure/qGrams.md @@ -2,10 +2,12 @@ title: "qGrams" description: "String similarity based on q-grams (by default q=2)." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # qGrams + String similarity based on q-grams (by default q=2). diff --git a/docs/build/reference/distancemeasure/relaxedEquality.md b/docs/build/reference/distancemeasure/relaxedEquality.md index 65510d05c..8f69328f1 100644 --- a/docs/build/reference/distancemeasure/relaxedEquality.md +++ b/docs/build/reference/distancemeasure/relaxedEquality.md @@ -2,10 +2,12 @@ title: "Relaxed equality" description: "Return success if strings are equal, failure otherwise. Lower/upper case and differences like ö/o, n/ñ, c/ç etc. are treated as equal." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Relaxed equality + Return success if strings are equal, failure otherwise. Lower/upper case and differences like ö/o, n/ñ, c/ç etc. are treated as equal. diff --git a/docs/build/reference/distancemeasure/softjaccard.md b/docs/build/reference/distancemeasure/softjaccard.md index 6e32f53ca..96aa1e80e 100644 --- a/docs/build/reference/distancemeasure/softjaccard.md +++ b/docs/build/reference/distancemeasure/softjaccard.md @@ -2,10 +2,12 @@ title: "Soft Jaccard" description: "Soft Jaccard similarity coefficient. Same as Jaccard distance but values within an levenhstein distance of 'maxDistance' are considered equivalent." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Soft Jaccard + Soft Jaccard similarity coefficient. Same as Jaccard distance but values within an levenhstein distance of 'maxDistance' are considered equivalent. diff --git a/docs/build/reference/distancemeasure/startsWith.md b/docs/build/reference/distancemeasure/startsWith.md index bafb6777f..e002ed6d0 100644 --- a/docs/build/reference/distancemeasure/startsWith.md +++ b/docs/build/reference/distancemeasure/startsWith.md @@ -2,10 +2,12 @@ title: "Starts with" description: "Returns success if the first string starts with the second string, failure otherwise." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Starts with + Returns success if the first string starts with the second string, failure otherwise. diff --git a/docs/build/reference/distancemeasure/substringDistance.md b/docs/build/reference/distancemeasure/substringDistance.md index 11d5105aa..a3dbf9b02 100644 --- a/docs/build/reference/distancemeasure/substringDistance.md +++ b/docs/build/reference/distancemeasure/substringDistance.md @@ -2,10 +2,12 @@ title: "Substring comparison" description: "Return 0 to 1 for strong similarity to weak similarity. Based on the paper: Stoilos, Giorgos, Giorgos Stamou, and Stefanos Kollias. 'A string metric for ontology alignment.' The Semantic Web-ISWC 2005. Springer Berlin Heidelberg, 2005. 624-637." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Substring comparison + Return 0 to 1 for strong similarity to weak similarity. Based on the paper: Stoilos, Giorgos, Giorgos Stamou, and Stefanos Kollias. "A string metric for ontology alignment." The Semantic Web-ISWC 2005. Springer Berlin Heidelberg, 2005. 624-637. diff --git a/docs/build/reference/distancemeasure/tokenwiseDistance.md b/docs/build/reference/distancemeasure/tokenwiseDistance.md index 34976d55f..aed4e8f5f 100644 --- a/docs/build/reference/distancemeasure/tokenwiseDistance.md +++ b/docs/build/reference/distancemeasure/tokenwiseDistance.md @@ -2,10 +2,12 @@ title: "Token-wise distance" description: "Token-wise string distance using the specified metric." icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Token-wise distance + Token-wise string distance using the specified metric. diff --git a/docs/build/reference/distancemeasure/wgs84.md b/docs/build/reference/distancemeasure/wgs84.md index 64296411c..8feb4cf67 100644 --- a/docs/build/reference/distancemeasure/wgs84.md +++ b/docs/build/reference/distancemeasure/wgs84.md @@ -2,10 +2,12 @@ title: "Geographical distance" description: "Computes the geographical distance between two points. Author: Konrad Höffner (MOLE subgroup of Research Group AKSW, University of Leipzig)" icon: octicons/cross-reference-24 -tags: +tags: - DistanceMeasure --- + # Geographical distance + Computes the geographical distance between two points. Author: Konrad Höffner (MOLE subgroup of Research Group AKSW, University of Leipzig) diff --git a/docs/build/reference/index.md b/docs/build/reference/index.md index 03ec5cb90..98ecf5b8d 100644 --- a/docs/build/reference/index.md +++ b/docs/build/reference/index.md @@ -5,7 +5,9 @@ tags: - Build - Reference --- + # Task and Operator Reference + **:octicons-people-24: Intended audience:** Linked Data Experts and Domain Experts diff --git a/docs/build/reference/transformer/Combine/concat.md b/docs/build/reference/transformer/Combine/concat.md index 6287f4d79..a487da3bf 100644 --- a/docs/build/reference/transformer/Combine/concat.md +++ b/docs/build/reference/transformer/Combine/concat.md @@ -2,10 +2,12 @@ title: "Concatenate" description: "Concatenates strings from multiple inputs." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Concatenate + Concatenates strings from multiple inputs. @@ -112,7 +114,7 @@ Concatenates strings from multiple inputs. * Returns: - ``` + ```text [First Second] ``` diff --git a/docs/build/reference/transformer/Combine/concatMultiValues.md b/docs/build/reference/transformer/Combine/concatMultiValues.md index 527583536..ac70b442e 100644 --- a/docs/build/reference/transformer/Combine/concatMultiValues.md +++ b/docs/build/reference/transformer/Combine/concatMultiValues.md @@ -2,10 +2,12 @@ title: "Concatenate multiple values" description: "Concatenates multiple values received for an input. If applied to multiple inputs, yields at most one value per input. Optionally removes duplicate values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Concatenate multiple values + Concatenates multiple values received for an input. If applied to multiple inputs, yields at most one value per input. Optionally removes duplicate values. @@ -64,14 +66,14 @@ Concatenates multiple values received for an input. If applied to multiple input * Input values: 1. - ``` + ```text [a \b, c] ``` * Returns: - ``` + ```text [a \b \c] diff --git a/docs/build/reference/transformer/Combine/concatPairwise.md b/docs/build/reference/transformer/Combine/concatPairwise.md index fa98976e2..6b03af3aa 100644 --- a/docs/build/reference/transformer/Combine/concatPairwise.md +++ b/docs/build/reference/transformer/Combine/concatPairwise.md @@ -2,10 +2,12 @@ title: "Concatenate pairwise" description: "Concatenates the values of multiple inputs pairwise." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Concatenate pairwise + Concatenates the values of multiple inputs pairwise. diff --git a/docs/build/reference/transformer/Combine/merge.md b/docs/build/reference/transformer/Combine/merge.md index 4bcd5cefe..44adb6872 100644 --- a/docs/build/reference/transformer/Combine/merge.md +++ b/docs/build/reference/transformer/Combine/merge.md @@ -2,10 +2,12 @@ title: "Merge" description: "Merges the values of all inputs." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Merge + Merges the values of all inputs. diff --git a/docs/build/reference/transformer/Conditional/containsAllOf.md b/docs/build/reference/transformer/Conditional/containsAllOf.md index 041e3336a..e57f487d8 100644 --- a/docs/build/reference/transformer/Conditional/containsAllOf.md +++ b/docs/build/reference/transformer/Conditional/containsAllOf.md @@ -2,10 +2,12 @@ title: "Contains all of" description: "Accepts two inputs. If the first input contains all of the second input values it returns 'true', else 'false' is returned." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Contains all of + Accepts two inputs. If the first input contains all of the second input values it returns 'true', else 'false' is returned. diff --git a/docs/build/reference/transformer/Conditional/containsAnyOf.md b/docs/build/reference/transformer/Conditional/containsAnyOf.md index 57a04e1e7..f491134e0 100644 --- a/docs/build/reference/transformer/Conditional/containsAnyOf.md +++ b/docs/build/reference/transformer/Conditional/containsAnyOf.md @@ -2,10 +2,12 @@ title: "Contains any of" description: "Accepts two inputs. If the first input contains any of the second input values it returns 'true', else 'false' is returned." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Contains any of + Accepts two inputs. If the first input contains any of the second input values it returns 'true', else 'false' is returned. diff --git a/docs/build/reference/transformer/Conditional/ifContains.md b/docs/build/reference/transformer/Conditional/ifContains.md index 2049b2ded..a61cd6b97 100644 --- a/docs/build/reference/transformer/Conditional/ifContains.md +++ b/docs/build/reference/transformer/Conditional/ifContains.md @@ -2,10 +2,12 @@ title: "If contains" description: "Accepts two or three inputs. If the first input contains the given value, the second input is forwarded. Otherwise, the third input is forwarded (if present)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # If contains + Accepts two or three inputs. If the first input contains the given value, the second input is forwarded. Otherwise, the third input is forwarded (if present). diff --git a/docs/build/reference/transformer/Conditional/ifExists.md b/docs/build/reference/transformer/Conditional/ifExists.md index 70f8f4b5c..cef473094 100644 --- a/docs/build/reference/transformer/Conditional/ifExists.md +++ b/docs/build/reference/transformer/Conditional/ifExists.md @@ -2,10 +2,12 @@ title: "If exists" description: "Accepts two or three inputs. If the first input provides a value, the second input is forwarded. Otherwise, the third input is forwarded (if present)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # If exists + Accepts two or three inputs. If the first input provides a value, the second input is forwarded. Otherwise, the third input is forwarded (if present). diff --git a/docs/build/reference/transformer/Conditional/ifMatchesRegex.md b/docs/build/reference/transformer/Conditional/ifMatchesRegex.md index ffa63508f..75c750339 100644 --- a/docs/build/reference/transformer/Conditional/ifMatchesRegex.md +++ b/docs/build/reference/transformer/Conditional/ifMatchesRegex.md @@ -2,10 +2,12 @@ title: "If matches regex" description: "This transformer uses a regular expression as a matching condition, in order to distinguish which input to take." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # If matches regex + ## Description diff --git a/docs/build/reference/transformer/Conditional/negateTransformer.md b/docs/build/reference/transformer/Conditional/negateTransformer.md index e09a6f577..ef79a5033 100644 --- a/docs/build/reference/transformer/Conditional/negateTransformer.md +++ b/docs/build/reference/transformer/Conditional/negateTransformer.md @@ -2,10 +2,12 @@ title: "Negate binary (NOT)" description: "Accepts one input, which is either 'true', '1' or 'false', '0' and negates it." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Negate binary (NOT) + Accepts one input, which is either 'true', '1' or 'false', '0' and negates it. diff --git a/docs/build/reference/transformer/Conversion/convertCharset.md b/docs/build/reference/transformer/Conversion/convertCharset.md index 22bcf38d9..d50b14167 100644 --- a/docs/build/reference/transformer/Conversion/convertCharset.md +++ b/docs/build/reference/transformer/Conversion/convertCharset.md @@ -2,10 +2,12 @@ title: "Convert charset" description: "Convert the string from 'sourceCharset' to 'targetCharset'." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Convert charset + Convert the string from "sourceCharset" to "targetCharset". diff --git a/docs/build/reference/transformer/Date/compareDates.md b/docs/build/reference/transformer/Date/compareDates.md index 5726eb65e..22fc2747d 100644 --- a/docs/build/reference/transformer/Date/compareDates.md +++ b/docs/build/reference/transformer/Date/compareDates.md @@ -2,10 +2,12 @@ title: "Compare dates" description: "Compares two dates." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Compare dates + Compares two dates. diff --git a/docs/build/reference/transformer/Date/currentDate.md b/docs/build/reference/transformer/Date/currentDate.md index 30289a646..96e2c008d 100644 --- a/docs/build/reference/transformer/Date/currentDate.md +++ b/docs/build/reference/transformer/Date/currentDate.md @@ -2,10 +2,12 @@ title: "Current date" description: "Outputs the current date." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Current date + Outputs the current date. diff --git a/docs/build/reference/transformer/Date/datetoTimestamp.md b/docs/build/reference/transformer/Date/datetoTimestamp.md index a87b58050..868602665 100644 --- a/docs/build/reference/transformer/Date/datetoTimestamp.md +++ b/docs/build/reference/transformer/Date/datetoTimestamp.md @@ -2,10 +2,12 @@ title: "Date to timestamp" description: "Convert an xsd:dateTime to a timestamp. Returns the passed time since the Unix Epoch (1970-01-01)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Date to timestamp + Convert an xsd:dateTime to a timestamp. Returns the passed time since the Unix Epoch (1970-01-01). diff --git a/docs/build/reference/transformer/Date/duration.md b/docs/build/reference/transformer/Date/duration.md index 73f9ac07a..2410f7fc5 100644 --- a/docs/build/reference/transformer/Date/duration.md +++ b/docs/build/reference/transformer/Date/duration.md @@ -2,10 +2,12 @@ title: "Duration" description: "Computes the time difference between two data times." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Duration + Computes the time difference between two data times. diff --git a/docs/build/reference/transformer/Date/durationInDays.md b/docs/build/reference/transformer/Date/durationInDays.md index 94f9f4b05..bdd6b125f 100644 --- a/docs/build/reference/transformer/Date/durationInDays.md +++ b/docs/build/reference/transformer/Date/durationInDays.md @@ -2,10 +2,12 @@ title: "Duration in days" description: "Converts an xsd:duration to days." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Duration in days + Converts an xsd:duration to days. diff --git a/docs/build/reference/transformer/Date/durationInSeconds.md b/docs/build/reference/transformer/Date/durationInSeconds.md index 083123fe7..feb044d55 100644 --- a/docs/build/reference/transformer/Date/durationInSeconds.md +++ b/docs/build/reference/transformer/Date/durationInSeconds.md @@ -2,10 +2,12 @@ title: "Duration in seconds" description: "Converts an xsd:duration to seconds." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Duration in seconds + Converts an xsd:duration to seconds. diff --git a/docs/build/reference/transformer/Date/durationInYears.md b/docs/build/reference/transformer/Date/durationInYears.md index a3f0e22cb..39b984b01 100644 --- a/docs/build/reference/transformer/Date/durationInYears.md +++ b/docs/build/reference/transformer/Date/durationInYears.md @@ -2,10 +2,12 @@ title: "Duration in years" description: "Converts an xsd:duration to years." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Duration in years + Converts an xsd:duration to years. diff --git a/docs/build/reference/transformer/Date/numberToDuration.md b/docs/build/reference/transformer/Date/numberToDuration.md index d1d4dffb8..d9e7688bd 100644 --- a/docs/build/reference/transformer/Date/numberToDuration.md +++ b/docs/build/reference/transformer/Date/numberToDuration.md @@ -2,10 +2,12 @@ title: "Number to duration" description: "Converts a number to an xsd:duration." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Number to duration + Converts a number to an xsd:duration. diff --git a/docs/build/reference/transformer/Date/parseDate.md b/docs/build/reference/transformer/Date/parseDate.md index f0b368ebd..c9574fe60 100644 --- a/docs/build/reference/transformer/Date/parseDate.md +++ b/docs/build/reference/transformer/Date/parseDate.md @@ -2,10 +2,12 @@ title: "Parse date pattern" description: "Parses a date based on a specified pattern, returning an xsd:date." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Parse date pattern + Parses a date based on a specified pattern, returning an xsd:date. diff --git a/docs/build/reference/transformer/Date/timeToDate.md b/docs/build/reference/transformer/Date/timeToDate.md index 6979b9736..b381b5190 100644 --- a/docs/build/reference/transformer/Date/timeToDate.md +++ b/docs/build/reference/transformer/Date/timeToDate.md @@ -2,10 +2,12 @@ title: "Timestamp to date" description: "Convert a timestamp to xsd:date format. Expects an integer that denotes the passed time since the Unix Epoch (1970-01-01)" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Timestamp to date + Convert a timestamp to xsd:date format. Expects an integer that denotes the passed time since the Unix Epoch (1970-01-01) diff --git a/docs/build/reference/transformer/Excel/Excel_ABS.md b/docs/build/reference/transformer/Excel/Excel_ABS.md index 8d5f4866e..edbf4276a 100644 --- a/docs/build/reference/transformer/Excel/Excel_ABS.md +++ b/docs/build/reference/transformer/Excel/Excel_ABS.md @@ -2,10 +2,12 @@ title: "Abs" description: "Excel ABS(number): Returns the absolute value of the given number." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Abs + Excel ABS(number): Returns the absolute value of the given number. diff --git a/docs/build/reference/transformer/Excel/Excel_ACOS.md b/docs/build/reference/transformer/Excel/Excel_ACOS.md index e00b31dfb..191e9e251 100644 --- a/docs/build/reference/transformer/Excel/Excel_ACOS.md +++ b/docs/build/reference/transformer/Excel/Excel_ACOS.md @@ -2,10 +2,12 @@ title: "Acos" description: "Excel ACOS(number): Returns the inverse cosine of the given number in radians." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Acos + Excel ACOS(number): Returns the inverse cosine of the given number in radians. diff --git a/docs/build/reference/transformer/Excel/Excel_ACOSH.md b/docs/build/reference/transformer/Excel/Excel_ACOSH.md index 27baf6a7f..395be7256 100644 --- a/docs/build/reference/transformer/Excel/Excel_ACOSH.md +++ b/docs/build/reference/transformer/Excel/Excel_ACOSH.md @@ -2,10 +2,12 @@ title: "Acosh" description: "Excel ACOSH(number): Returns the inverse hyperbolic cosine of the given number in radians." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Acosh + Excel ACOSH(number): Returns the inverse hyperbolic cosine of the given number in radians. diff --git a/docs/build/reference/transformer/Excel/Excel_AND.md b/docs/build/reference/transformer/Excel/Excel_AND.md index fc4af8aae..436e82e15 100644 --- a/docs/build/reference/transformer/Excel/Excel_AND.md +++ b/docs/build/reference/transformer/Excel/Excel_AND.md @@ -2,10 +2,12 @@ title: "And" description: "Excel AND(argument1; argument2 ...argument30): Returns TRUE if all the arguments are considered TRUE, and FALSE otherwise." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # And + Excel AND(argument1; argument2 ...argument30): Returns TRUE if all the arguments are considered TRUE, and FALSE otherwise. diff --git a/docs/build/reference/transformer/Excel/Excel_ASIN.md b/docs/build/reference/transformer/Excel/Excel_ASIN.md index f5fedabb6..7e92cf4ae 100644 --- a/docs/build/reference/transformer/Excel/Excel_ASIN.md +++ b/docs/build/reference/transformer/Excel/Excel_ASIN.md @@ -2,10 +2,12 @@ title: "Asin" description: "Excel ASIN(number): Returns the inverse sine of the given number in radians." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Asin + Excel ASIN(number): Returns the inverse sine of the given number in radians. diff --git a/docs/build/reference/transformer/Excel/Excel_ASINH.md b/docs/build/reference/transformer/Excel/Excel_ASINH.md index c277d8098..886babb51 100644 --- a/docs/build/reference/transformer/Excel/Excel_ASINH.md +++ b/docs/build/reference/transformer/Excel/Excel_ASINH.md @@ -2,10 +2,12 @@ title: "Asinh" description: "Excel ASINH(number): Returns the inverse hyperbolic sine of the given number in radians." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Asinh + Excel ASINH(number): Returns the inverse hyperbolic sine of the given number in radians. diff --git a/docs/build/reference/transformer/Excel/Excel_ATAN.md b/docs/build/reference/transformer/Excel/Excel_ATAN.md index e9b6d267a..888b673b9 100644 --- a/docs/build/reference/transformer/Excel/Excel_ATAN.md +++ b/docs/build/reference/transformer/Excel/Excel_ATAN.md @@ -2,10 +2,12 @@ title: "Atan" description: "Excel ATAN(number): Returns the inverse tangent of the given number in radians." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Atan + Excel ATAN(number): Returns the inverse tangent of the given number in radians. diff --git a/docs/build/reference/transformer/Excel/Excel_ATAN2.md b/docs/build/reference/transformer/Excel/Excel_ATAN2.md index 8a2f99db9..2b8771d1f 100644 --- a/docs/build/reference/transformer/Excel/Excel_ATAN2.md +++ b/docs/build/reference/transformer/Excel/Excel_ATAN2.md @@ -2,10 +2,12 @@ title: "Atan2" description: "Excel ATAN2(number_x; number_y): Returns the inverse tangent of the specified x and y coordinates. Number_x is the value for the x coordinate. Number_y is the value for the y coordinate." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Atan2 + Excel ATAN2(number_x; number_y): Returns the inverse tangent of the specified x and y coordinates. Number_x is the value for the x coordinate. Number_y is the value for the y coordinate. diff --git a/docs/build/reference/transformer/Excel/Excel_ATANH.md b/docs/build/reference/transformer/Excel/Excel_ATANH.md index 96ec1d6de..0b4046ea2 100644 --- a/docs/build/reference/transformer/Excel/Excel_ATANH.md +++ b/docs/build/reference/transformer/Excel/Excel_ATANH.md @@ -2,10 +2,12 @@ title: "Atanh" description: "Excel ATANH(number): Returns the inverse hyperbolic tangent of the given number. (Angle is returned in radians.)" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Atanh + Excel ATANH(number): Returns the inverse hyperbolic tangent of the given number. (Angle is returned in radians.) diff --git a/docs/build/reference/transformer/Excel/Excel_AVEDEV.md b/docs/build/reference/transformer/Excel/Excel_AVEDEV.md index 4c0a25b8a..b2cb49b85 100644 --- a/docs/build/reference/transformer/Excel/Excel_AVEDEV.md +++ b/docs/build/reference/transformer/Excel/Excel_AVEDEV.md @@ -2,10 +2,12 @@ title: "Avedev" description: "Excel AVEDEV(number1; number2; ... number_30): Returns the average of the absolute deviations of data points from their mean. Displays the diffusion in a data set. Number_1; number_2; ... number_30 are values or ranges that represent a sample. Each number can also be replaced by a reference." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Avedev + Excel AVEDEV(number1; number2; ... number_30): Returns the average of the absolute deviations of data points from their mean. Displays the diffusion in a data set. Number_1; number_2; ... number_30 are values or ranges that represent a sample. Each number can also be replaced by a reference. diff --git a/docs/build/reference/transformer/Excel/Excel_AVERAGE.md b/docs/build/reference/transformer/Excel/Excel_AVERAGE.md index f98b066d4..108d92cb4 100644 --- a/docs/build/reference/transformer/Excel/Excel_AVERAGE.md +++ b/docs/build/reference/transformer/Excel/Excel_AVERAGE.md @@ -2,10 +2,12 @@ title: "Average" description: "Excel AVERAGE(number_1; number_2; ... number_30): Returns the average of the arguments. Number_1; number_2; ... number_30 are numerical values or ranges. Text is ignored." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Average + Excel AVERAGE(number_1; number_2; ... number_30): Returns the average of the arguments. Number_1; number_2; ... number_30 are numerical values or ranges. Text is ignored. diff --git a/docs/build/reference/transformer/Excel/Excel_AVERAGEA.md b/docs/build/reference/transformer/Excel/Excel_AVERAGEA.md index 992a46a0d..a49d81d63 100644 --- a/docs/build/reference/transformer/Excel/Excel_AVERAGEA.md +++ b/docs/build/reference/transformer/Excel/Excel_AVERAGEA.md @@ -2,10 +2,12 @@ title: "Averagea" description: "Excel AVERAGEA(value_1; value_2; ... value_30): Returns the average of the arguments. The value of a text is 0. Value_1; value_2; ... value_30 are values or ranges." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Averagea + Excel AVERAGEA(value_1; value_2; ... value_30): Returns the average of the arguments. The value of a text is 0. Value_1; value_2; ... value_30 are values or ranges. diff --git a/docs/build/reference/transformer/Excel/Excel_CEILING.md b/docs/build/reference/transformer/Excel/Excel_CEILING.md index e41ed050d..1871de150 100644 --- a/docs/build/reference/transformer/Excel/Excel_CEILING.md +++ b/docs/build/reference/transformer/Excel/Excel_CEILING.md @@ -2,10 +2,12 @@ title: "Ceiling" description: "Excel CEILING(number; significance; mode): Rounds the given number to the nearest integer or multiple of significance. Significance is the value to whose multiple of ten the value is to be rounded up (.01, .1, 1, 10, etc.). Mode is an optional value. If it is indicated and non-zero and if the number and significance are negative, rounding up is carried out based on that value." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Ceiling + Excel CEILING(number; significance; mode): Rounds the given number to the nearest integer or multiple of significance. Significance is the value to whose multiple of ten the value is to be rounded up (.01, .1, 1, 10, etc.). Mode is an optional value. If it is indicated and non-zero and if the number and significance are negative, rounding up is carried out based on that value. diff --git a/docs/build/reference/transformer/Excel/Excel_CHOOSE.md b/docs/build/reference/transformer/Excel/Excel_CHOOSE.md index c81e88902..1ff3fee0e 100644 --- a/docs/build/reference/transformer/Excel/Excel_CHOOSE.md +++ b/docs/build/reference/transformer/Excel/Excel_CHOOSE.md @@ -2,10 +2,12 @@ title: "Choose" description: "Excel CHOOSE(index; value1; ... value30): Uses an index to return a value from a list of up to 30 values. Index is a reference or number between 1 and 30 indicating which value is to be taken from the list. Value1; ... value30 is the list of values entered as a reference to a cell or as individual values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Choose + Excel CHOOSE(index; value1; ... value30): Uses an index to return a value from a list of up to 30 values. Index is a reference or number between 1 and 30 indicating which value is to be taken from the list. Value1; ... value30 is the list of values entered as a reference to a cell or as individual values. diff --git a/docs/build/reference/transformer/Excel/Excel_CLEAN.md b/docs/build/reference/transformer/Excel/Excel_CLEAN.md index bd20ef9ad..7ff09b85c 100644 --- a/docs/build/reference/transformer/Excel/Excel_CLEAN.md +++ b/docs/build/reference/transformer/Excel/Excel_CLEAN.md @@ -2,10 +2,12 @@ title: "Clean" description: "Excel CLEAN(text): Removes all non-printing characters from the string. Text refers to the text from which to remove all non-printable characters." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Clean + Excel CLEAN(text): Removes all non-printing characters from the string. Text refers to the text from which to remove all non-printable characters. diff --git a/docs/build/reference/transformer/Excel/Excel_CODE.md b/docs/build/reference/transformer/Excel/Excel_CODE.md index 8fed63f48..e082d16d3 100644 --- a/docs/build/reference/transformer/Excel/Excel_CODE.md +++ b/docs/build/reference/transformer/Excel/Excel_CODE.md @@ -2,10 +2,12 @@ title: "Code" description: "Excel CODE(text): Returns a numeric code for the first character in a text string. Text is the text for which the code of the first character is to be found." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Code + Excel CODE(text): Returns a numeric code for the first character in a text string. Text is the text for which the code of the first character is to be found. diff --git a/docs/build/reference/transformer/Excel/Excel_COMBIN.md b/docs/build/reference/transformer/Excel/Excel_COMBIN.md index b1f2bbd98..2944fd0ab 100644 --- a/docs/build/reference/transformer/Excel/Excel_COMBIN.md +++ b/docs/build/reference/transformer/Excel/Excel_COMBIN.md @@ -2,10 +2,12 @@ title: "Combin" description: "Excel COMBIN(count_1; count_2): Returns the number of combinations for a given number of objects. Count_1 is the total number of elements. Count_2 is the selected count from the elements. This is the same as the nCr function on a calculator." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Combin + Excel COMBIN(count_1; count_2): Returns the number of combinations for a given number of objects. Count_1 is the total number of elements. Count_2 is the selected count from the elements. This is the same as the nCr function on a calculator. diff --git a/docs/build/reference/transformer/Excel/Excel_CORREL.md b/docs/build/reference/transformer/Excel/Excel_CORREL.md index 41670c1cf..cb30f963b 100644 --- a/docs/build/reference/transformer/Excel/Excel_CORREL.md +++ b/docs/build/reference/transformer/Excel/Excel_CORREL.md @@ -2,10 +2,12 @@ title: "Correl" description: "Excel CORREL(data_1; data_2): Returns the correlation coefficient between two data sets. Data_1 is the first data set. Data_2 is the second data set." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Correl + Excel CORREL(data_1; data_2): Returns the correlation coefficient between two data sets. Data_1 is the first data set. Data_2 is the second data set. diff --git a/docs/build/reference/transformer/Excel/Excel_COS.md b/docs/build/reference/transformer/Excel/Excel_COS.md index 8869bbeab..a1f7f70fa 100644 --- a/docs/build/reference/transformer/Excel/Excel_COS.md +++ b/docs/build/reference/transformer/Excel/Excel_COS.md @@ -2,10 +2,12 @@ title: "Cos" description: "Excel COS(number): Returns the cosine of the given number (angle in radians)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Cos + Excel COS(number): Returns the cosine of the given number (angle in radians). diff --git a/docs/build/reference/transformer/Excel/Excel_COSH.md b/docs/build/reference/transformer/Excel/Excel_COSH.md index 8799f957d..f3b5cdc73 100644 --- a/docs/build/reference/transformer/Excel/Excel_COSH.md +++ b/docs/build/reference/transformer/Excel/Excel_COSH.md @@ -2,10 +2,12 @@ title: "Cosh" description: "Excel COSH(number): Returns the hyperbolic cosine of the given number (angle in radians)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Cosh + Excel COSH(number): Returns the hyperbolic cosine of the given number (angle in radians). diff --git a/docs/build/reference/transformer/Excel/Excel_COUNT.md b/docs/build/reference/transformer/Excel/Excel_COUNT.md index 93c16fd3f..1a24ea616 100644 --- a/docs/build/reference/transformer/Excel/Excel_COUNT.md +++ b/docs/build/reference/transformer/Excel/Excel_COUNT.md @@ -2,10 +2,12 @@ title: "Count" description: "Excel COUNT(value_1; value_2; ... value_30): Counts how many numbers are in the list of arguments. Text entries are ignored. Value_1; value_2; ... value_30 are values or ranges which are to be counted." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Count + Excel COUNT(value_1; value_2; ... value_30): Counts how many numbers are in the list of arguments. Text entries are ignored. Value_1; value_2; ... value_30 are values or ranges which are to be counted. diff --git a/docs/build/reference/transformer/Excel/Excel_COUNTA.md b/docs/build/reference/transformer/Excel/Excel_COUNTA.md index e6d4aef73..99b75fde6 100644 --- a/docs/build/reference/transformer/Excel/Excel_COUNTA.md +++ b/docs/build/reference/transformer/Excel/Excel_COUNTA.md @@ -2,10 +2,12 @@ title: "Counta" description: "Excel COUNTA(value_1; value_2; ... value_30): Counts how many values are in the list of arguments. Text entries are also counted, even when they contain an empty string of length 0. If an argument is an array or reference, empty cells within the array or reference are ignored. value_1; value_2; ... value_30 are up to 30 arguments representing the values to be counted." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Counta + Excel COUNTA(value_1; value_2; ... value_30): Counts how many values are in the list of arguments. Text entries are also counted, even when they contain an empty string of length 0. If an argument is an array or reference, empty cells within the array or reference are ignored. value_1; value_2; ... value_30 are up to 30 arguments representing the values to be counted. diff --git a/docs/build/reference/transformer/Excel/Excel_COVAR.md b/docs/build/reference/transformer/Excel/Excel_COVAR.md index a8a32ebe5..0936189ad 100644 --- a/docs/build/reference/transformer/Excel/Excel_COVAR.md +++ b/docs/build/reference/transformer/Excel/Excel_COVAR.md @@ -2,10 +2,12 @@ title: "Covar" description: "Excel COVAR(data_1; data_2): Returns the covariance of the product of paired deviations. Data_1 is the first data set. Data_2 is the second data set." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Covar + Excel COVAR(data_1; data_2): Returns the covariance of the product of paired deviations. Data_1 is the first data set. Data_2 is the second data set. diff --git a/docs/build/reference/transformer/Excel/Excel_DEGREES.md b/docs/build/reference/transformer/Excel/Excel_DEGREES.md index 0e08f6bbc..77959c5d9 100644 --- a/docs/build/reference/transformer/Excel/Excel_DEGREES.md +++ b/docs/build/reference/transformer/Excel/Excel_DEGREES.md @@ -2,10 +2,12 @@ title: "Degrees" description: "Excel DEGREES(number): Converts the given number in radians to degrees." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Degrees + Excel DEGREES(number): Converts the given number in radians to degrees. diff --git a/docs/build/reference/transformer/Excel/Excel_DEVSQ.md b/docs/build/reference/transformer/Excel/Excel_DEVSQ.md index a69a21bd3..6f92a284a 100644 --- a/docs/build/reference/transformer/Excel/Excel_DEVSQ.md +++ b/docs/build/reference/transformer/Excel/Excel_DEVSQ.md @@ -2,10 +2,12 @@ title: "Devsq" description: "Excel DEVSQ(number_1; number_2; ... number_30): Returns the sum of squares of deviations based on a sample mean. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Devsq + Excel DEVSQ(number_1; number_2; ... number_30): Returns the sum of squares of deviations based on a sample mean. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample. diff --git a/docs/build/reference/transformer/Excel/Excel_EVEN.md b/docs/build/reference/transformer/Excel/Excel_EVEN.md index d1cc82fca..19ce73218 100644 --- a/docs/build/reference/transformer/Excel/Excel_EVEN.md +++ b/docs/build/reference/transformer/Excel/Excel_EVEN.md @@ -2,10 +2,12 @@ title: "Even" description: "Excel EVEN(number): Rounds the given number up to the nearest even integer." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Even + Excel EVEN(number): Rounds the given number up to the nearest even integer. diff --git a/docs/build/reference/transformer/Excel/Excel_EXACT.md b/docs/build/reference/transformer/Excel/Excel_EXACT.md index a848a1f52..10132bd89 100644 --- a/docs/build/reference/transformer/Excel/Excel_EXACT.md +++ b/docs/build/reference/transformer/Excel/Excel_EXACT.md @@ -2,10 +2,12 @@ title: "Exact" description: "Excel EXACT(text_1; text_2): Compares two text strings and returns TRUE if they are identical. This function is case- sensitive. Text_1 is the first text to compare. Text_2 is the second text to compare." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Exact + Excel EXACT(text_1; text_2): Compares two text strings and returns TRUE if they are identical. This function is case- sensitive. Text_1 is the first text to compare. Text_2 is the second text to compare. diff --git a/docs/build/reference/transformer/Excel/Excel_EXP.md b/docs/build/reference/transformer/Excel/Excel_EXP.md index 250a3920f..3f3b19e57 100644 --- a/docs/build/reference/transformer/Excel/Excel_EXP.md +++ b/docs/build/reference/transformer/Excel/Excel_EXP.md @@ -2,10 +2,12 @@ title: "Exp" description: "Excel EXP(number): Returns e raised to the power of the given number." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Exp + Excel EXP(number): Returns e raised to the power of the given number. diff --git a/docs/build/reference/transformer/Excel/Excel_FACT.md b/docs/build/reference/transformer/Excel/Excel_FACT.md index e996a1dc0..e59b10fc2 100644 --- a/docs/build/reference/transformer/Excel/Excel_FACT.md +++ b/docs/build/reference/transformer/Excel/Excel_FACT.md @@ -2,10 +2,12 @@ title: "Fact" description: "Excel FACT(number): Returns the factorial of the given number." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Fact + Excel FACT(number): Returns the factorial of the given number. diff --git a/docs/build/reference/transformer/Excel/Excel_FALSE.md b/docs/build/reference/transformer/Excel/Excel_FALSE.md index 94dc0e67e..84c9ac439 100644 --- a/docs/build/reference/transformer/Excel/Excel_FALSE.md +++ b/docs/build/reference/transformer/Excel/Excel_FALSE.md @@ -2,10 +2,12 @@ title: "False" description: "Excel FALSE(): Set the logical value to FALSE. The FALSE() function does not require any arguments." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # False + Excel FALSE(): Set the logical value to FALSE. The FALSE() function does not require any arguments. diff --git a/docs/build/reference/transformer/Excel/Excel_FIND.md b/docs/build/reference/transformer/Excel/Excel_FIND.md index f21cba0c2..d062fb124 100644 --- a/docs/build/reference/transformer/Excel/Excel_FIND.md +++ b/docs/build/reference/transformer/Excel/Excel_FIND.md @@ -2,10 +2,12 @@ title: "Find" description: "Excel FIND(find_text; text; position): Looks for a string of text within another string. Where to begin the search can also be defined. The search term can be a number or any string of characters. The search is case-sensitive. Find_text is the text to be found. Text is the text where the search takes place. Position (optional) is the position in the text from which the search starts." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Find + Excel FIND(find_text; text; position): Looks for a string of text within another string. Where to begin the search can also be defined. The search term can be a number or any string of characters. The search is case-sensitive. Find_text is the text to be found. Text is the text where the search takes place. Position (optional) is the position in the text from which the search starts. diff --git a/docs/build/reference/transformer/Excel/Excel_FLOOR.md b/docs/build/reference/transformer/Excel/Excel_FLOOR.md index cdd2f1714..9129725d2 100644 --- a/docs/build/reference/transformer/Excel/Excel_FLOOR.md +++ b/docs/build/reference/transformer/Excel/Excel_FLOOR.md @@ -2,10 +2,12 @@ title: "Floor" description: "Excel FLOOR(number; significance; mode): Rounds the given number down to the nearest multiple of significance. Significance is the value to whose multiple of ten the number is to be rounded down (.01, .1, 1, 10, etc.). Mode is an optional value. If it is indicated and non-zero and if the number and significance are negative, rounding up is carried out based on that value." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Floor + Excel FLOOR(number; significance; mode): Rounds the given number down to the nearest multiple of significance. Significance is the value to whose multiple of ten the number is to be rounded down (.01, .1, 1, 10, etc.). Mode is an optional value. If it is indicated and non-zero and if the number and significance are negative, rounding up is carried out based on that value. diff --git a/docs/build/reference/transformer/Excel/Excel_FORECAST.md b/docs/build/reference/transformer/Excel/Excel_FORECAST.md index afd080666..34d7bd0c0 100644 --- a/docs/build/reference/transformer/Excel/Excel_FORECAST.md +++ b/docs/build/reference/transformer/Excel/Excel_FORECAST.md @@ -2,10 +2,12 @@ title: "Forecast" description: "Excel FORECAST(value; data_Y; data_X): Extrapolates future values based on existing x and y values. Value is the x value, for which the y value of the linear regression is to be returned. Data_Y is the array or range of known y's. Data_X is the array or range of known x's. Does not work for exponential functions." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Forecast + Excel FORECAST(value; data_Y; data_X): Extrapolates future values based on existing x and y values. Value is the x value, for which the y value of the linear regression is to be returned. Data_Y is the array or range of known y's. Data_X is the array or range of known x's. Does not work for exponential functions. diff --git a/docs/build/reference/transformer/Excel/Excel_FV.md b/docs/build/reference/transformer/Excel/Excel_FV.md index 587b1d629..81efde87b 100644 --- a/docs/build/reference/transformer/Excel/Excel_FV.md +++ b/docs/build/reference/transformer/Excel/Excel_FV.md @@ -2,10 +2,12 @@ title: "Fv" description: "Excel FV(rate; NPER; PMT; PV; type): Returns the future value of an investment based on periodic, constant payments and a constant interest rate. Rate is the periodic interest rate. NPER is the total number of periods. PMT is the annuity paid regularly per period. PV (optional) is the present cash value of an investment. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Fv + Excel FV(rate; NPER; PMT; PV; type): Returns the future value of an investment based on periodic, constant payments and a constant interest rate. Rate is the periodic interest rate. NPER is the total number of periods. PMT is the annuity paid regularly per period. PV (optional) is the present cash value of an investment. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. diff --git a/docs/build/reference/transformer/Excel/Excel_GEOMEAN.md b/docs/build/reference/transformer/Excel/Excel_GEOMEAN.md index d4f4d519c..bbe1d7415 100644 --- a/docs/build/reference/transformer/Excel/Excel_GEOMEAN.md +++ b/docs/build/reference/transformer/Excel/Excel_GEOMEAN.md @@ -2,10 +2,12 @@ title: "Geomean" description: "Excel GEOMEAN(number_1; number_2; ... number_30): Returns the geometric mean of a sample. Number_1; number_2; ... number_30 are numerical arguments or ranges that represent a random sample." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Geomean + Excel GEOMEAN(number_1; number_2; ... number_30): Returns the geometric mean of a sample. Number_1; number_2; ... number_30 are numerical arguments or ranges that represent a random sample. diff --git a/docs/build/reference/transformer/Excel/Excel_IF.md b/docs/build/reference/transformer/Excel/Excel_IF.md index c0bcafc53..fe30fa887 100644 --- a/docs/build/reference/transformer/Excel/Excel_IF.md +++ b/docs/build/reference/transformer/Excel/Excel_IF.md @@ -2,10 +2,12 @@ title: "If" description: "Excel IF(test; then_value; otherwise_value): Returns different values based on the test value. Note that in this implementation it will not actually evaluate logical conditions. Then_value is the value that is returned if the test is TRUE. Otherwise_value (optional) is the value that is returned if the test is FALSE." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # If + Excel IF(test; then_value; otherwise_value): Returns different values based on the test value. Note that in this implementation it will not actually evaluate logical conditions. Then_value is the value that is returned if the test is TRUE. Otherwise_value (optional) is the value that is returned if the test is FALSE. diff --git a/docs/build/reference/transformer/Excel/Excel_INT.md b/docs/build/reference/transformer/Excel/Excel_INT.md index edd3305bf..90a074ed1 100644 --- a/docs/build/reference/transformer/Excel/Excel_INT.md +++ b/docs/build/reference/transformer/Excel/Excel_INT.md @@ -2,10 +2,12 @@ title: "Int" description: "Excel INT(number): Rounds the given number down to the nearest integer." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Int + Excel INT(number): Rounds the given number down to the nearest integer. diff --git a/docs/build/reference/transformer/Excel/Excel_INTERCEPT.md b/docs/build/reference/transformer/Excel/Excel_INTERCEPT.md index fe393abc3..dbc1d3f0d 100644 --- a/docs/build/reference/transformer/Excel/Excel_INTERCEPT.md +++ b/docs/build/reference/transformer/Excel/Excel_INTERCEPT.md @@ -2,10 +2,12 @@ title: "Intercept" description: "Excel INTERCEPT(data_Y; data_X): Calculates the y-value at which a line will intersect the y-axis by using known x-values and y-values. Data_Y is the dependent set of observations or data. Data_X is the independent set of observations or data. Names, arrays or references containing numbers must be used here. Numbers can also be entered directly." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Intercept + Excel INTERCEPT(data_Y; data_X): Calculates the y-value at which a line will intersect the y-axis by using known x-values and y-values. Data_Y is the dependent set of observations or data. Data_X is the independent set of observations or data. Names, arrays or references containing numbers must be used here. Numbers can also be entered directly. diff --git a/docs/build/reference/transformer/Excel/Excel_IPMT.md b/docs/build/reference/transformer/Excel/Excel_IPMT.md index 3a8d4acb1..726aace8c 100644 --- a/docs/build/reference/transformer/Excel/Excel_IPMT.md +++ b/docs/build/reference/transformer/Excel/Excel_IPMT.md @@ -2,10 +2,12 @@ title: "Ipmt" description: "Excel IPMT(rate; period; NPER; PV; FV; type): Calculates the periodic amortization for an investment with regular payments and a constant interest rate. Rate is the periodic interest rate. Period is the period for which the compound interest is calculated. NPER is the total number of periods during which annuity is paid. Period=NPER, if compound interest for the last period is calculated. PV is the present cash value in sequence of payments. FV (optional) is the desired value (future value) at the end of the periods. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Ipmt + Excel IPMT(rate; period; NPER; PV; FV; type): Calculates the periodic amortization for an investment with regular payments and a constant interest rate. Rate is the periodic interest rate. Period is the period for which the compound interest is calculated. NPER is the total number of periods during which annuity is paid. Period=NPER, if compound interest for the last period is calculated. PV is the present cash value in sequence of payments. FV (optional) is the desired value (future value) at the end of the periods. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. diff --git a/docs/build/reference/transformer/Excel/Excel_IRR.md b/docs/build/reference/transformer/Excel/Excel_IRR.md index 9110daefc..d6da4440b 100644 --- a/docs/build/reference/transformer/Excel/Excel_IRR.md +++ b/docs/build/reference/transformer/Excel/Excel_IRR.md @@ -2,10 +2,12 @@ title: "Irr" description: "Excel IRR(values; guess): Calculates the internal rate of return for an investment. The values represent cash flow values at regular intervals; at least one value must be negative (payments), and at least one value must be positive (income). Values is an array containing the values. Guess (optional) is the estimated value. If you can provide only a few values, you should provide an initial guess to enable the iteration." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Irr + Excel IRR(values; guess): Calculates the internal rate of return for an investment. The values represent cash flow values at regular intervals; at least one value must be negative (payments), and at least one value must be positive (income). Values is an array containing the values. Guess (optional) is the estimated value. If you can provide only a few values, you should provide an initial guess to enable the iteration. diff --git a/docs/build/reference/transformer/Excel/Excel_LARGE.md b/docs/build/reference/transformer/Excel/Excel_LARGE.md index 116689722..19373c7c3 100644 --- a/docs/build/reference/transformer/Excel/Excel_LARGE.md +++ b/docs/build/reference/transformer/Excel/Excel_LARGE.md @@ -2,10 +2,12 @@ title: "Large" description: "Excel LARGE(data; rank_c): Returns the Rank_c-th largest value in a data set. Data is the cell range of data. Rank_c is the ranking of the value (2nd largest, 3rd largest, etc.) written as an integer." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Large + Excel LARGE(data; rank_c): Returns the Rank_c-th largest value in a data set. Data is the cell range of data. Rank_c is the ranking of the value (2nd largest, 3rd largest, etc.) written as an integer. diff --git a/docs/build/reference/transformer/Excel/Excel_LEFT.md b/docs/build/reference/transformer/Excel/Excel_LEFT.md index c34e1341b..b98b73150 100644 --- a/docs/build/reference/transformer/Excel/Excel_LEFT.md +++ b/docs/build/reference/transformer/Excel/Excel_LEFT.md @@ -2,10 +2,12 @@ title: "Left" description: "Excel LEFT(text; number): Returns the first character or characters in a text string. Text is the text where the initial partial words are to be determined. Number (optional) is the number of characters for the start text. If this parameter is not defined, one character is returned." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Left + Excel LEFT(text; number): Returns the first character or characters in a text string. Text is the text where the initial partial words are to be determined. Number (optional) is the number of characters for the start text. If this parameter is not defined, one character is returned. diff --git a/docs/build/reference/transformer/Excel/Excel_LN.md b/docs/build/reference/transformer/Excel/Excel_LN.md index 08bb7f263..ca2c4598c 100644 --- a/docs/build/reference/transformer/Excel/Excel_LN.md +++ b/docs/build/reference/transformer/Excel/Excel_LN.md @@ -2,10 +2,12 @@ title: "Ln" description: "Excel LN(number): Returns the natural logarithm based on the constant e of the given number." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Ln + Excel LN(number): Returns the natural logarithm based on the constant e of the given number. diff --git a/docs/build/reference/transformer/Excel/Excel_LOG.md b/docs/build/reference/transformer/Excel/Excel_LOG.md index 41c7ffc83..23cc79dcb 100644 --- a/docs/build/reference/transformer/Excel/Excel_LOG.md +++ b/docs/build/reference/transformer/Excel/Excel_LOG.md @@ -2,10 +2,12 @@ title: "Log" description: "Excel LOG(number; base): Returns the logarithm of the given number to the specified base. Base is the base for the logarithm calculation." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Log + Excel LOG(number; base): Returns the logarithm of the given number to the specified base. Base is the base for the logarithm calculation. diff --git a/docs/build/reference/transformer/Excel/Excel_LOG10.md b/docs/build/reference/transformer/Excel/Excel_LOG10.md index b97a0b93c..5fca759e3 100644 --- a/docs/build/reference/transformer/Excel/Excel_LOG10.md +++ b/docs/build/reference/transformer/Excel/Excel_LOG10.md @@ -2,10 +2,12 @@ title: "Log10" description: "Excel LOG10(number): Returns the base-10 logarithm of the given number." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Log10 + Excel LOG10(number): Returns the base-10 logarithm of the given number. diff --git a/docs/build/reference/transformer/Excel/Excel_MAX.md b/docs/build/reference/transformer/Excel/Excel_MAX.md index 27b24d07b..c060db4f8 100644 --- a/docs/build/reference/transformer/Excel/Excel_MAX.md +++ b/docs/build/reference/transformer/Excel/Excel_MAX.md @@ -2,10 +2,12 @@ title: "Max" description: "Excel MAX(number_1; number_2; ... number_30): Returns the maximum value in a list of arguments. Number_1; number_2; ... number_30 are numerical values or ranges." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Max + Excel MAX(number_1; number_2; ... number_30): Returns the maximum value in a list of arguments. Number_1; number_2; ... number_30 are numerical values or ranges. diff --git a/docs/build/reference/transformer/Excel/Excel_MAXA.md b/docs/build/reference/transformer/Excel/Excel_MAXA.md index e60c43021..742fe0d8c 100644 --- a/docs/build/reference/transformer/Excel/Excel_MAXA.md +++ b/docs/build/reference/transformer/Excel/Excel_MAXA.md @@ -2,10 +2,12 @@ title: "Maxa" description: "Excel MAXA(value_1; value_2; ... value_30): Returns the maximum value in a list of arguments. Unlike MAX, text can be entered. The value of the text is 0. Value_1; value_2; ... value_30 are values or ranges." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Maxa + Excel MAXA(value_1; value_2; ... value_30): Returns the maximum value in a list of arguments. Unlike MAX, text can be entered. The value of the text is 0. Value_1; value_2; ... value_30 are values or ranges. diff --git a/docs/build/reference/transformer/Excel/Excel_MEDIAN.md b/docs/build/reference/transformer/Excel/Excel_MEDIAN.md index bc09ef899..405e33102 100644 --- a/docs/build/reference/transformer/Excel/Excel_MEDIAN.md +++ b/docs/build/reference/transformer/Excel/Excel_MEDIAN.md @@ -2,10 +2,12 @@ title: "Median" description: "Excel MEDIAN(number_1; number_2; ... number_30): Returns the median of a set of numbers. Number_1; number_2; ... number_30 are values or ranges, which represent a sample. Each number can also be replaced by a reference." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Median + Excel MEDIAN(number_1; number_2; ... number_30): Returns the median of a set of numbers. Number_1; number_2; ... number_30 are values or ranges, which represent a sample. Each number can also be replaced by a reference. diff --git a/docs/build/reference/transformer/Excel/Excel_MID.md b/docs/build/reference/transformer/Excel/Excel_MID.md index b08f492e2..e1bcd43e7 100644 --- a/docs/build/reference/transformer/Excel/Excel_MID.md +++ b/docs/build/reference/transformer/Excel/Excel_MID.md @@ -2,10 +2,12 @@ title: "Mid" description: "Excel MID(text; start; number): Returns a text segment of a character string. The parameters specify the starting position and the number of characters. Text is the text containing the characters to extract. Start is the position of the first character in the text to extract. Number is the number of characters in the part of the text." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Mid + Excel MID(text; start; number): Returns a text segment of a character string. The parameters specify the starting position and the number of characters. Text is the text containing the characters to extract. Start is the position of the first character in the text to extract. Number is the number of characters in the part of the text. diff --git a/docs/build/reference/transformer/Excel/Excel_MIN.md b/docs/build/reference/transformer/Excel/Excel_MIN.md index 1c0787d5e..f27bee047 100644 --- a/docs/build/reference/transformer/Excel/Excel_MIN.md +++ b/docs/build/reference/transformer/Excel/Excel_MIN.md @@ -2,10 +2,12 @@ title: "Min" description: "Excel MIN(number_1; number_2; ... number_30): Returns the minimum value in a list of arguments. Number_1; number_2; ... number_30 are numerical values or ranges." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Min + Excel MIN(number_1; number_2; ... number_30): Returns the minimum value in a list of arguments. Number_1; number_2; ... number_30 are numerical values or ranges. diff --git a/docs/build/reference/transformer/Excel/Excel_MINA.md b/docs/build/reference/transformer/Excel/Excel_MINA.md index 013dbb3df..d901213ab 100644 --- a/docs/build/reference/transformer/Excel/Excel_MINA.md +++ b/docs/build/reference/transformer/Excel/Excel_MINA.md @@ -2,10 +2,12 @@ title: "Mina" description: "Excel MINA(value_1; value_2; ... value_30): Returns the minimum value in a list of arguments. Here text can also be entered. The value of the text is 0. Value_1; value_2; ... value_30 are values or ranges." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Mina + Excel MINA(value_1; value_2; ... value_30): Returns the minimum value in a list of arguments. Here text can also be entered. The value of the text is 0. Value_1; value_2; ... value_30 are values or ranges. diff --git a/docs/build/reference/transformer/Excel/Excel_MIRR.md b/docs/build/reference/transformer/Excel/Excel_MIRR.md index 8f3079816..cacde6ef2 100644 --- a/docs/build/reference/transformer/Excel/Excel_MIRR.md +++ b/docs/build/reference/transformer/Excel/Excel_MIRR.md @@ -2,10 +2,12 @@ title: "Mirr" description: "Excel MIRR(values; investment; reinvest_rate): Calculates the modified internal rate of return of a series of investments. Values corresponds to the array or the cell reference for cells whose content corresponds to the payments. Investment is the rate of interest of the investments (the negative values of the array) Reinvest_rate is the rate of interest of the reinvestment (the positive values of the array)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Mirr + Excel MIRR(values; investment; reinvest_rate): Calculates the modified internal rate of return of a series of investments. Values corresponds to the array or the cell reference for cells whose content corresponds to the payments. Investment is the rate of interest of the investments (the negative values of the array) Reinvest_rate is the rate of interest of the reinvestment (the positive values of the array). diff --git a/docs/build/reference/transformer/Excel/Excel_MOD.md b/docs/build/reference/transformer/Excel/Excel_MOD.md index d5cb2b096..70e50905b 100644 --- a/docs/build/reference/transformer/Excel/Excel_MOD.md +++ b/docs/build/reference/transformer/Excel/Excel_MOD.md @@ -2,10 +2,12 @@ title: "Mod" description: "Excel MOD(dividend; divisor): Returns the remainder after a number is divided by a divisor. Dividend is the number which will be divided by the divisor. Divisor is the number by which to divide the dividend." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Mod + Excel MOD(dividend; divisor): Returns the remainder after a number is divided by a divisor. Dividend is the number which will be divided by the divisor. Divisor is the number by which to divide the dividend. diff --git a/docs/build/reference/transformer/Excel/Excel_MODE.md b/docs/build/reference/transformer/Excel/Excel_MODE.md index 1f3a81826..2646fff8e 100644 --- a/docs/build/reference/transformer/Excel/Excel_MODE.md +++ b/docs/build/reference/transformer/Excel/Excel_MODE.md @@ -2,10 +2,12 @@ title: "Mode" description: "Excel MODE(number_1; number_2; ... number_30): Returns the most common value in a data set. Number_1; number_2; ... number_30 are numerical values or ranges. If several values have the same frequency, it returns the smallest value. An error occurs when a value does not appear twice." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Mode + Excel MODE(number_1; number_2; ... number_30): Returns the most common value in a data set. Number_1; number_2; ... number_30 are numerical values or ranges. If several values have the same frequency, it returns the smallest value. An error occurs when a value does not appear twice. diff --git a/docs/build/reference/transformer/Excel/Excel_NORMDIST.md b/docs/build/reference/transformer/Excel/Excel_NORMDIST.md index 92afdc17b..6f5e34b62 100644 --- a/docs/build/reference/transformer/Excel/Excel_NORMDIST.md +++ b/docs/build/reference/transformer/Excel/Excel_NORMDIST.md @@ -2,10 +2,12 @@ title: "Normdist" description: "Excel NORMDIST(number; mean; STDEV; C): Returns the normal distribution for the given Number in the distribution. Mean is the mean value of the distribution. STDEV is the standard deviation of the distribution. C = 0 calculates the density function, and C = 1 calculates the distribution." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Normdist + Excel NORMDIST(number; mean; STDEV; C): Returns the normal distribution for the given Number in the distribution. Mean is the mean value of the distribution. STDEV is the standard deviation of the distribution. C = 0 calculates the density function, and C = 1 calculates the distribution. diff --git a/docs/build/reference/transformer/Excel/Excel_NORMINV.md b/docs/build/reference/transformer/Excel/Excel_NORMINV.md index b101d7b1b..c551b74ee 100644 --- a/docs/build/reference/transformer/Excel/Excel_NORMINV.md +++ b/docs/build/reference/transformer/Excel/Excel_NORMINV.md @@ -2,10 +2,12 @@ title: "Norminv" description: "Excel NORMINV(number; mean; STDEV): Returns the inverse of the normal distribution for the given Number in the distribution. Mean is the mean value in the normal distribution. STDEV is the standard deviation of the normal distribution." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Norminv + Excel NORMINV(number; mean; STDEV): Returns the inverse of the normal distribution for the given Number in the distribution. Mean is the mean value in the normal distribution. STDEV is the standard deviation of the normal distribution. diff --git a/docs/build/reference/transformer/Excel/Excel_NORMSDIST.md b/docs/build/reference/transformer/Excel/Excel_NORMSDIST.md index b6144fd26..f2af4fad8 100644 --- a/docs/build/reference/transformer/Excel/Excel_NORMSDIST.md +++ b/docs/build/reference/transformer/Excel/Excel_NORMSDIST.md @@ -2,10 +2,12 @@ title: "Normsdist" description: "Excel NORMSDIST(number): Returns the standard normal cumulative distribution for the given Number." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Normsdist + Excel NORMSDIST(number): Returns the standard normal cumulative distribution for the given Number. diff --git a/docs/build/reference/transformer/Excel/Excel_NORMSINV.md b/docs/build/reference/transformer/Excel/Excel_NORMSINV.md index 918bd4908..653f428bb 100644 --- a/docs/build/reference/transformer/Excel/Excel_NORMSINV.md +++ b/docs/build/reference/transformer/Excel/Excel_NORMSINV.md @@ -2,10 +2,12 @@ title: "Normsinv" description: "Excel NORMSINV(number): Returns the inverse of the standard normal distribution for the given Number, a probability value." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Normsinv + Excel NORMSINV(number): Returns the inverse of the standard normal distribution for the given Number, a probability value. diff --git a/docs/build/reference/transformer/Excel/Excel_NOT.md b/docs/build/reference/transformer/Excel/Excel_NOT.md index 7cd71484b..9ec2a4365 100644 --- a/docs/build/reference/transformer/Excel/Excel_NOT.md +++ b/docs/build/reference/transformer/Excel/Excel_NOT.md @@ -2,10 +2,12 @@ title: "Not" description: "Excel NOT(logical_value): Reverses the logical value. Logical_value is any value to be reversed." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Not + Excel NOT(logical_value): Reverses the logical value. Logical_value is any value to be reversed. diff --git a/docs/build/reference/transformer/Excel/Excel_NPER.md b/docs/build/reference/transformer/Excel/Excel_NPER.md index e664f3c68..41f49d79e 100644 --- a/docs/build/reference/transformer/Excel/Excel_NPER.md +++ b/docs/build/reference/transformer/Excel/Excel_NPER.md @@ -2,10 +2,12 @@ title: "Nper" description: "Excel NPER(rate; PMT; PV; FV; type): Returns the number of periods for an investment based on periodic, constant payments and a constant interest rate. Rate is the periodic interest rate. PMT is the constant annuity paid in each period. PV is the present value (cash value) in a sequence of payments. FV (optional) is the future value, which is reached at the end of the last period. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Nper + Excel NPER(rate; PMT; PV; FV; type): Returns the number of periods for an investment based on periodic, constant payments and a constant interest rate. Rate is the periodic interest rate. PMT is the constant annuity paid in each period. PV is the present value (cash value) in a sequence of payments. FV (optional) is the future value, which is reached at the end of the last period. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. diff --git a/docs/build/reference/transformer/Excel/Excel_NPV.md b/docs/build/reference/transformer/Excel/Excel_NPV.md index 8621f8e3f..37e8e3790 100644 --- a/docs/build/reference/transformer/Excel/Excel_NPV.md +++ b/docs/build/reference/transformer/Excel/Excel_NPV.md @@ -2,10 +2,12 @@ title: "Npv" description: "Excel NPV(Rate; value_1; value_2; ... value_30): Returns the net present value of an investment based on a series of periodic cash flows and a discount rate. Rate is the discount rate for a period. Value_1; value_2;... value_30 are values representing deposits or withdrawals." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Npv + Excel NPV(Rate; value_1; value_2; ... value_30): Returns the net present value of an investment based on a series of periodic cash flows and a discount rate. Rate is the discount rate for a period. Value_1; value_2;... value_30 are values representing deposits or withdrawals. diff --git a/docs/build/reference/transformer/Excel/Excel_ODD.md b/docs/build/reference/transformer/Excel/Excel_ODD.md index 676b155f2..839e526ae 100644 --- a/docs/build/reference/transformer/Excel/Excel_ODD.md +++ b/docs/build/reference/transformer/Excel/Excel_ODD.md @@ -2,10 +2,12 @@ title: "Odd" description: "Excel ODD(number): Rounds the given number up to the nearest odd integer." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Odd + Excel ODD(number): Rounds the given number up to the nearest odd integer. diff --git a/docs/build/reference/transformer/Excel/Excel_OR.md b/docs/build/reference/transformer/Excel/Excel_OR.md index a30102225..bfd8bd098 100644 --- a/docs/build/reference/transformer/Excel/Excel_OR.md +++ b/docs/build/reference/transformer/Excel/Excel_OR.md @@ -2,10 +2,12 @@ title: "Or" description: "Excel OR(logical_value_1; logical_value_2; ...logical_value_30): Returns TRUE if at least one argument is TRUE. Returns the value FALSE if all the arguments have the logical value FALSE. Logical_value_1; logical_value_2; ...logical_value_30 are conditions to be checked. All conditions can be either TRUE or FALSE. If a range is entered as a parameter, the function uses the value from the range that is in the current column or row." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Or + Excel OR(logical_value_1; logical_value_2; ...logical_value_30): Returns TRUE if at least one argument is TRUE. Returns the value FALSE if all the arguments have the logical value FALSE. Logical_value_1; logical_value_2; ...logical_value_30 are conditions to be checked. All conditions can be either TRUE or FALSE. If a range is entered as a parameter, the function uses the value from the range that is in the current column or row. diff --git a/docs/build/reference/transformer/Excel/Excel_PEARSON.md b/docs/build/reference/transformer/Excel/Excel_PEARSON.md index a052bbf74..45a777514 100644 --- a/docs/build/reference/transformer/Excel/Excel_PEARSON.md +++ b/docs/build/reference/transformer/Excel/Excel_PEARSON.md @@ -2,10 +2,12 @@ title: "Pearson" description: "Excel PEARSON(data_1; data_2): Returns the Pearson product moment correlation coefficient r. Data_1 is the array of the first data set. Data_2 is the array of the second data set." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Pearson + Excel PEARSON(data_1; data_2): Returns the Pearson product moment correlation coefficient r. Data_1 is the array of the first data set. Data_2 is the array of the second data set. diff --git a/docs/build/reference/transformer/Excel/Excel_PERCENTILE.md b/docs/build/reference/transformer/Excel/Excel_PERCENTILE.md index e1b8357b6..bbd1fd969 100644 --- a/docs/build/reference/transformer/Excel/Excel_PERCENTILE.md +++ b/docs/build/reference/transformer/Excel/Excel_PERCENTILE.md @@ -2,10 +2,12 @@ title: "Percentile" description: "Excel PERCENTILE(data; alpha): Returns the alpha-percentile of data values in an array. Data is the array of data. Alpha is the percentage of the scale between 0 and 1." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Percentile + Excel PERCENTILE(data; alpha): Returns the alpha-percentile of data values in an array. Data is the array of data. Alpha is the percentage of the scale between 0 and 1. diff --git a/docs/build/reference/transformer/Excel/Excel_PERCENTRANK.md b/docs/build/reference/transformer/Excel/Excel_PERCENTRANK.md index 813d91465..eca50b70c 100644 --- a/docs/build/reference/transformer/Excel/Excel_PERCENTRANK.md +++ b/docs/build/reference/transformer/Excel/Excel_PERCENTRANK.md @@ -2,10 +2,12 @@ title: "Percentrank" description: "Excel PERCENTRANK(data; value): Returns the percentage rank (percentile) of the given value in a sample. Data is the array of data in the sample." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Percentrank + Excel PERCENTRANK(data; value): Returns the percentage rank (percentile) of the given value in a sample. Data is the array of data in the sample. diff --git a/docs/build/reference/transformer/Excel/Excel_PI.md b/docs/build/reference/transformer/Excel/Excel_PI.md index fbfb513d1..6f4e436bd 100644 --- a/docs/build/reference/transformer/Excel/Excel_PI.md +++ b/docs/build/reference/transformer/Excel/Excel_PI.md @@ -2,10 +2,12 @@ title: "Pi" description: "Excel PI(): Returns the value of PI to fourteen decimal places." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Pi + Excel PI(): Returns the value of PI to fourteen decimal places. diff --git a/docs/build/reference/transformer/Excel/Excel_PMT.md b/docs/build/reference/transformer/Excel/Excel_PMT.md index 87bac964f..492b6cfd7 100644 --- a/docs/build/reference/transformer/Excel/Excel_PMT.md +++ b/docs/build/reference/transformer/Excel/Excel_PMT.md @@ -2,10 +2,12 @@ title: "Pmt" description: "Excel PMT(rate; NPER; PV; FV; type): Returns the periodic payment for an annuity with constant interest rates. Rate is the periodic interest rate. NPER is the number of periods in which annuity is paid. PV is the present value (cash value) in a sequence of payments. FV (optional) is the desired value (future value) to be reached at the end of the periodic payments. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Pmt + Excel PMT(rate; NPER; PV; FV; type): Returns the periodic payment for an annuity with constant interest rates. Rate is the periodic interest rate. NPER is the number of periods in which annuity is paid. PV is the present value (cash value) in a sequence of payments. FV (optional) is the desired value (future value) to be reached at the end of the periodic payments. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. diff --git a/docs/build/reference/transformer/Excel/Excel_POISSON.md b/docs/build/reference/transformer/Excel/Excel_POISSON.md index 5f86ae3d3..f8dbe16af 100644 --- a/docs/build/reference/transformer/Excel/Excel_POISSON.md +++ b/docs/build/reference/transformer/Excel/Excel_POISSON.md @@ -2,10 +2,12 @@ title: "Poisson" description: "Excel POISSON(number; mean; C): Returns the Poisson distribution for the given Number. Mean is the middle value of the Poisson distribution. C = 0 calculates the density function, and C = 1 calculates the distribution." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Poisson + Excel POISSON(number; mean; C): Returns the Poisson distribution for the given Number. Mean is the middle value of the Poisson distribution. C = 0 calculates the density function, and C = 1 calculates the distribution. diff --git a/docs/build/reference/transformer/Excel/Excel_POWER.md b/docs/build/reference/transformer/Excel/Excel_POWER.md index 8ef7c6427..2fe3d2b46 100644 --- a/docs/build/reference/transformer/Excel/Excel_POWER.md +++ b/docs/build/reference/transformer/Excel/Excel_POWER.md @@ -2,10 +2,12 @@ title: "Power" description: "Excel POWER(base; power): Returns the result of a number raised to a power. Base is the number that is to be raised to the given power. Power is the exponent by which the base is to be raised." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Power + Excel POWER(base; power): Returns the result of a number raised to a power. Base is the number that is to be raised to the given power. Power is the exponent by which the base is to be raised. diff --git a/docs/build/reference/transformer/Excel/Excel_PPMT.md b/docs/build/reference/transformer/Excel/Excel_PPMT.md index c701740ea..21f127603 100644 --- a/docs/build/reference/transformer/Excel/Excel_PPMT.md +++ b/docs/build/reference/transformer/Excel/Excel_PPMT.md @@ -2,10 +2,12 @@ title: "Ppmt" description: "Excel PPMT(rate; period; NPER; PV; FV; type): Returns for a given period the payment on the principal for an investment that is based on periodic and constant payments and a constant interest rate. Rate is the periodic interest rate. Period is the amortization period. NPER is the total number of periods during which annuity is paid. PV is the present value in the sequence of payments. FV (optional) is the desired (future) value. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Ppmt + Excel PPMT(rate; period; NPER; PV; FV; type): Returns for a given period the payment on the principal for an investment that is based on periodic and constant payments and a constant interest rate. Rate is the periodic interest rate. Period is the amortization period. NPER is the total number of periods during which annuity is paid. PV is the present value in the sequence of payments. FV (optional) is the desired (future) value. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. diff --git a/docs/build/reference/transformer/Excel/Excel_PRODUCT.md b/docs/build/reference/transformer/Excel/Excel_PRODUCT.md index d436a504c..1a59a8274 100644 --- a/docs/build/reference/transformer/Excel/Excel_PRODUCT.md +++ b/docs/build/reference/transformer/Excel/Excel_PRODUCT.md @@ -2,10 +2,12 @@ title: "Product" description: "Excel PRODUCT(number 1 to 30): Multiplies all the numbers given as arguments and returns the product. Number 1 to number 30 are up to 30 arguments whose product is to be calculated, separated by semi-colons." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Product + Excel PRODUCT(number 1 to 30): Multiplies all the numbers given as arguments and returns the product. Number 1 to number 30 are up to 30 arguments whose product is to be calculated, separated by semi-colons. diff --git a/docs/build/reference/transformer/Excel/Excel_PROPER.md b/docs/build/reference/transformer/Excel/Excel_PROPER.md index 97116e85a..d2c75bc54 100644 --- a/docs/build/reference/transformer/Excel/Excel_PROPER.md +++ b/docs/build/reference/transformer/Excel/Excel_PROPER.md @@ -2,10 +2,12 @@ title: "Proper" description: "Excel PROPER(text): Capitalizes the first letter in all words of a text string. Text is the text to be converted." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Proper + Excel PROPER(text): Capitalizes the first letter in all words of a text string. Text is the text to be converted. diff --git a/docs/build/reference/transformer/Excel/Excel_PV.md b/docs/build/reference/transformer/Excel/Excel_PV.md index fb6897c56..43d3a978e 100644 --- a/docs/build/reference/transformer/Excel/Excel_PV.md +++ b/docs/build/reference/transformer/Excel/Excel_PV.md @@ -2,10 +2,12 @@ title: "Pv" description: "Excel PV(rate; NPER; PMT; FV; type): Returns the present value of an investment resulting from a series of regular payments. Rate defines the interest rate per period. NPER is the total number of payment periods. PMT is the regular payment made per period. FV (optional) defines the future value remaining after the final installment has been made. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Pv + Excel PV(rate; NPER; PMT; FV; type): Returns the present value of an investment resulting from a series of regular payments. Rate defines the interest rate per period. NPER is the total number of payment periods. PMT is the regular payment made per period. FV (optional) defines the future value remaining after the final installment has been made. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. diff --git a/docs/build/reference/transformer/Excel/Excel_RADIANS.md b/docs/build/reference/transformer/Excel/Excel_RADIANS.md index 7b7027b1c..0a771e176 100644 --- a/docs/build/reference/transformer/Excel/Excel_RADIANS.md +++ b/docs/build/reference/transformer/Excel/Excel_RADIANS.md @@ -2,10 +2,12 @@ title: "Radians" description: "Excel RADIANS(number): Converts the given number in degrees to radians." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Radians + Excel RADIANS(number): Converts the given number in degrees to radians. diff --git a/docs/build/reference/transformer/Excel/Excel_RAND.md b/docs/build/reference/transformer/Excel/Excel_RAND.md index e43196452..f7826cbae 100644 --- a/docs/build/reference/transformer/Excel/Excel_RAND.md +++ b/docs/build/reference/transformer/Excel/Excel_RAND.md @@ -2,10 +2,12 @@ title: "Rand" description: "Excel RAND(): Returns a random number between 0 and 1." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Rand + Excel RAND(): Returns a random number between 0 and 1. diff --git a/docs/build/reference/transformer/Excel/Excel_RANK.md b/docs/build/reference/transformer/Excel/Excel_RANK.md index 71e227cba..5ada033d7 100644 --- a/docs/build/reference/transformer/Excel/Excel_RANK.md +++ b/docs/build/reference/transformer/Excel/Excel_RANK.md @@ -2,10 +2,12 @@ title: "Rank" description: "Excel RANK(value; data; type): Returns the rank of the given Value in a sample. Data is the array or range of data in the sample. Type (optional) is the sequence order, either ascending (0) or descending (1)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Rank + Excel RANK(value; data; type): Returns the rank of the given Value in a sample. Data is the array or range of data in the sample. Type (optional) is the sequence order, either ascending (0) or descending (1). diff --git a/docs/build/reference/transformer/Excel/Excel_RATE.md b/docs/build/reference/transformer/Excel/Excel_RATE.md index 895a2087d..1fa3bd846 100644 --- a/docs/build/reference/transformer/Excel/Excel_RATE.md +++ b/docs/build/reference/transformer/Excel/Excel_RATE.md @@ -2,10 +2,12 @@ title: "Rate" description: "Excel RATE(NPER; PMT; PV; FV; type; guess): Returns the constant interest rate per period of an annuity. NPER is the total number of periods, during which payments are made (payment period). PMT is the constant payment (annuity) paid during each period. PV is the cash value in the sequence of payments. FV (optional) is the future value, which is reached at the end of the periodic payments. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. Guess (optional) determines the estimated value of the interest with iterative calculation." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Rate + Excel RATE(NPER; PMT; PV; FV; type; guess): Returns the constant interest rate per period of an annuity. NPER is the total number of periods, during which payments are made (payment period). PMT is the constant payment (annuity) paid during each period. PV is the cash value in the sequence of payments. FV (optional) is the future value, which is reached at the end of the periodic payments. Type (optional) defines whether the payment is due at the beginning (1) or the end (0) of a period. Guess (optional) determines the estimated value of the interest with iterative calculation. diff --git a/docs/build/reference/transformer/Excel/Excel_REPLACE.md b/docs/build/reference/transformer/Excel/Excel_REPLACE.md index 14a76dcb2..47e002f79 100644 --- a/docs/build/reference/transformer/Excel/Excel_REPLACE.md +++ b/docs/build/reference/transformer/Excel/Excel_REPLACE.md @@ -2,10 +2,12 @@ title: "Replace" description: "Excel REPLACE(text; position; length; new_text): Replaces part of a text string with a different text string. This function can be used to replace both characters and numbers (which are automatically converted to text). The result of the function is always displayed as text. To perform further calculations with a number which has been replaced by text, convert it back to a number using the VALUE function. Any text containing numbers must be enclosed in quotation marks so it is not interpreted as a number and automatically converted to text. Text is text of which a part will be replaced. Position is the position within the text where the replacement will begin. Length is the number of characters in text to be replaced. New_text is the text which replaces text.." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Replace + Excel REPLACE(text; position; length; new_text): Replaces part of a text string with a different text string. This function can be used to replace both characters and numbers (which are automatically converted to text). The result of the function is always displayed as text. To perform further calculations with a number which has been replaced by text, convert it back to a number using the VALUE function. Any text containing numbers must be enclosed in quotation marks so it is not interpreted as a number and automatically converted to text. Text is text of which a part will be replaced. Position is the position within the text where the replacement will begin. Length is the number of characters in text to be replaced. New_text is the text which replaces text.. diff --git a/docs/build/reference/transformer/Excel/Excel_REPT.md b/docs/build/reference/transformer/Excel/Excel_REPT.md index 196c35efe..60c2f876e 100644 --- a/docs/build/reference/transformer/Excel/Excel_REPT.md +++ b/docs/build/reference/transformer/Excel/Excel_REPT.md @@ -2,10 +2,12 @@ title: "Rept" description: "Excel REPT(text; number): Repeats a character string by the given number of copies. Text is the text to be repeated. Number is the number of repetitions. The result can be a maximum of 255 characters." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Rept + Excel REPT(text; number): Repeats a character string by the given number of copies. Text is the text to be repeated. Number is the number of repetitions. The result can be a maximum of 255 characters. diff --git a/docs/build/reference/transformer/Excel/Excel_RIGHT.md b/docs/build/reference/transformer/Excel/Excel_RIGHT.md index 7a0692bd6..4b627d4dd 100644 --- a/docs/build/reference/transformer/Excel/Excel_RIGHT.md +++ b/docs/build/reference/transformer/Excel/Excel_RIGHT.md @@ -2,10 +2,12 @@ title: "Right" description: "Excel RIGHT(text; number): Defines the last character or characters in a text string. Text is the text of which the right part is to be determined. Number (optional) is the number of characters from the right part of the text." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Right + Excel RIGHT(text; number): Defines the last character or characters in a text string. Text is the text of which the right part is to be determined. Number (optional) is the number of characters from the right part of the text. diff --git a/docs/build/reference/transformer/Excel/Excel_ROMAN.md b/docs/build/reference/transformer/Excel/Excel_ROMAN.md index 69bf9870b..f2ddb0a5f 100644 --- a/docs/build/reference/transformer/Excel/Excel_ROMAN.md +++ b/docs/build/reference/transformer/Excel/Excel_ROMAN.md @@ -2,10 +2,12 @@ title: "Roman" description: "Excel ROMAN(number; mode): Converts a number into a Roman numeral. The value range must be between 0 and 3999; the modes can be integers from 0 to 4. Number is the number that is to be converted into a Roman numeral. Mode (optional) indicates the degree of simplification. The higher the value, the greater is the simplification of the Roman numeral." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Roman + Excel ROMAN(number; mode): Converts a number into a Roman numeral. The value range must be between 0 and 3999; the modes can be integers from 0 to 4. Number is the number that is to be converted into a Roman numeral. Mode (optional) indicates the degree of simplification. The higher the value, the greater is the simplification of the Roman numeral. diff --git a/docs/build/reference/transformer/Excel/Excel_ROUND.md b/docs/build/reference/transformer/Excel/Excel_ROUND.md index 5a6805c0d..df8bf4848 100644 --- a/docs/build/reference/transformer/Excel/Excel_ROUND.md +++ b/docs/build/reference/transformer/Excel/Excel_ROUND.md @@ -2,10 +2,12 @@ title: "Round" description: "Excel ROUND(number; count): Rounds the given number to a certain number of decimal places according to valid mathematical criteria. Count (optional) is the number of the places to which the value is to be rounded. If the count parameter is negative, only the whole number portion is rounded. It is rounded to the place indicated by the count." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Round + Excel ROUND(number; count): Rounds the given number to a certain number of decimal places according to valid mathematical criteria. Count (optional) is the number of the places to which the value is to be rounded. If the count parameter is negative, only the whole number portion is rounded. It is rounded to the place indicated by the count. diff --git a/docs/build/reference/transformer/Excel/Excel_ROUNDDOWN.md b/docs/build/reference/transformer/Excel/Excel_ROUNDDOWN.md index 6eadbafde..9c6451970 100644 --- a/docs/build/reference/transformer/Excel/Excel_ROUNDDOWN.md +++ b/docs/build/reference/transformer/Excel/Excel_ROUNDDOWN.md @@ -2,10 +2,12 @@ title: "Rounddown" description: "Excel ROUNDDOWN(number; count): Rounds the given number. Count (optional) is the number of digits to be rounded down to. If the count parameter is negative, only the whole number portion is rounded. It is rounded to the place indicated by the count." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Rounddown + Excel ROUNDDOWN(number; count): Rounds the given number. Count (optional) is the number of digits to be rounded down to. If the count parameter is negative, only the whole number portion is rounded. It is rounded to the place indicated by the count. diff --git a/docs/build/reference/transformer/Excel/Excel_ROUNDUP.md b/docs/build/reference/transformer/Excel/Excel_ROUNDUP.md index 64fadeb7e..3dbdda11a 100644 --- a/docs/build/reference/transformer/Excel/Excel_ROUNDUP.md +++ b/docs/build/reference/transformer/Excel/Excel_ROUNDUP.md @@ -2,10 +2,12 @@ title: "Roundup" description: "Excel ROUNDUP(number; count): Rounds the given number up. Count (optional) is the number of digits to which rounding up is to be done. If the count parameter is negative, only the whole number portion is rounded. It is rounded to the place indicated by the count." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Roundup + Excel ROUNDUP(number; count): Rounds the given number up. Count (optional) is the number of digits to which rounding up is to be done. If the count parameter is negative, only the whole number portion is rounded. It is rounded to the place indicated by the count. diff --git a/docs/build/reference/transformer/Excel/Excel_SEARCH.md b/docs/build/reference/transformer/Excel/Excel_SEARCH.md index 0af1917fe..3944bc1ec 100644 --- a/docs/build/reference/transformer/Excel/Excel_SEARCH.md +++ b/docs/build/reference/transformer/Excel/Excel_SEARCH.md @@ -2,10 +2,12 @@ title: "Search" description: "Excel SEARCH(find_text; text; position): Returns the position of a text segment within a character string. The start of the search can be set as an option. The search text can be a number or any sequence of characters. The search is not case-sensitive. The search supports regular expressions. Find_text is the text to be searched for. Text is the text where the search will take place. Position (optional) is the position in the text where the search is to start." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Search + Excel SEARCH(find_text; text; position): Returns the position of a text segment within a character string. The start of the search can be set as an option. The search text can be a number or any sequence of characters. The search is not case-sensitive. The search supports regular expressions. Find_text is the text to be searched for. Text is the text where the search will take place. Position (optional) is the position in the text where the search is to start. diff --git a/docs/build/reference/transformer/Excel/Excel_SIGN.md b/docs/build/reference/transformer/Excel/Excel_SIGN.md index 2f384e149..609be2290 100644 --- a/docs/build/reference/transformer/Excel/Excel_SIGN.md +++ b/docs/build/reference/transformer/Excel/Excel_SIGN.md @@ -2,10 +2,12 @@ title: "Sign" description: "Excel SIGN(number): Returns the sign of the given number. The function returns the result 1 for a positive sign, -1 for a negative sign, and 0 for zero." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sign + Excel SIGN(number): Returns the sign of the given number. The function returns the result 1 for a positive sign, -1 for a negative sign, and 0 for zero. diff --git a/docs/build/reference/transformer/Excel/Excel_SIN.md b/docs/build/reference/transformer/Excel/Excel_SIN.md index 3084ebc1e..80cbc9962 100644 --- a/docs/build/reference/transformer/Excel/Excel_SIN.md +++ b/docs/build/reference/transformer/Excel/Excel_SIN.md @@ -2,10 +2,12 @@ title: "Sin" description: "Excel SIN(number): Returns the sine of the given number (angle in radians)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sin + Excel SIN(number): Returns the sine of the given number (angle in radians). diff --git a/docs/build/reference/transformer/Excel/Excel_SINH.md b/docs/build/reference/transformer/Excel/Excel_SINH.md index a9cc4c5d8..c4f78593c 100644 --- a/docs/build/reference/transformer/Excel/Excel_SINH.md +++ b/docs/build/reference/transformer/Excel/Excel_SINH.md @@ -2,10 +2,12 @@ title: "Sinh" description: "Excel SINH(number): Returns the hyperbolic sine of the given number (angle in radians)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sinh + Excel SINH(number): Returns the hyperbolic sine of the given number (angle in radians). diff --git a/docs/build/reference/transformer/Excel/Excel_SLOPE.md b/docs/build/reference/transformer/Excel/Excel_SLOPE.md index eeba32125..a7702fea1 100644 --- a/docs/build/reference/transformer/Excel/Excel_SLOPE.md +++ b/docs/build/reference/transformer/Excel/Excel_SLOPE.md @@ -2,10 +2,12 @@ title: "Slope" description: "Excel SLOPE(data_Y; data_X): Returns the slope of the linear regression line. Data_Y is the array or matrix of Y data. Data_X is the array or matrix of X data." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Slope + Excel SLOPE(data_Y; data_X): Returns the slope of the linear regression line. Data_Y is the array or matrix of Y data. Data_X is the array or matrix of X data. diff --git a/docs/build/reference/transformer/Excel/Excel_SMALL.md b/docs/build/reference/transformer/Excel/Excel_SMALL.md index 1fb0ef13f..6d74643e9 100644 --- a/docs/build/reference/transformer/Excel/Excel_SMALL.md +++ b/docs/build/reference/transformer/Excel/Excel_SMALL.md @@ -2,10 +2,12 @@ title: "Small" description: "Excel SMALL(data; rank_c): Returns the Rank_c-th smallest value in a data set. Data is the cell range of data. Rank_c is the rank of the value (2nd smallest, 3rd smallest, etc.) written as an integer." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Small + Excel SMALL(data; rank_c): Returns the Rank_c-th smallest value in a data set. Data is the cell range of data. Rank_c is the rank of the value (2nd smallest, 3rd smallest, etc.) written as an integer. diff --git a/docs/build/reference/transformer/Excel/Excel_SQRT.md b/docs/build/reference/transformer/Excel/Excel_SQRT.md index 0e2035978..13235873f 100644 --- a/docs/build/reference/transformer/Excel/Excel_SQRT.md +++ b/docs/build/reference/transformer/Excel/Excel_SQRT.md @@ -2,10 +2,12 @@ title: "Sqrt" description: "Excel SQRT(number): Returns the positive square root of the given number. The value of the number must be positive." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sqrt + Excel SQRT(number): Returns the positive square root of the given number. The value of the number must be positive. diff --git a/docs/build/reference/transformer/Excel/Excel_STANDARDIZE.md b/docs/build/reference/transformer/Excel/Excel_STANDARDIZE.md index 3bbc29030..b46df2a2f 100644 --- a/docs/build/reference/transformer/Excel/Excel_STANDARDIZE.md +++ b/docs/build/reference/transformer/Excel/Excel_STANDARDIZE.md @@ -2,10 +2,12 @@ title: "Standardize" description: "Excel STANDARDIZE(number; mean; STDEV): Converts a random variable to a normalized value. Number is the value to be standardized. Mean is the arithmetic mean of the distribution. STDEV is the standard deviation of the distribution." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Standardize + Excel STANDARDIZE(number; mean; STDEV): Converts a random variable to a normalized value. Number is the value to be standardized. Mean is the arithmetic mean of the distribution. STDEV is the standard deviation of the distribution. diff --git a/docs/build/reference/transformer/Excel/Excel_STDEV.md b/docs/build/reference/transformer/Excel/Excel_STDEV.md index 3863dacc5..982ff021c 100644 --- a/docs/build/reference/transformer/Excel/Excel_STDEV.md +++ b/docs/build/reference/transformer/Excel/Excel_STDEV.md @@ -2,10 +2,12 @@ title: "Stdev" description: "Excel STDEV(number_1; number_2; ... number_30): Estimates the standard deviation based on a sample. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample based on an entire population." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Stdev + Excel STDEV(number_1; number_2; ... number_30): Estimates the standard deviation based on a sample. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample based on an entire population. diff --git a/docs/build/reference/transformer/Excel/Excel_STDEVA.md b/docs/build/reference/transformer/Excel/Excel_STDEVA.md index 4503eb463..2ad74edbd 100644 --- a/docs/build/reference/transformer/Excel/Excel_STDEVA.md +++ b/docs/build/reference/transformer/Excel/Excel_STDEVA.md @@ -2,10 +2,12 @@ title: "Stdeva" description: "Excel STDEVA(value_1; value_2; ... value_30): Calculates the standard deviation of an estimation based on a sample. Value_1; value_2; ... value_30 are values or ranges representing a sample derived from an entire population. Text has the value 0." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Stdeva + Excel STDEVA(value_1; value_2; ... value_30): Calculates the standard deviation of an estimation based on a sample. Value_1; value_2; ... value_30 are values or ranges representing a sample derived from an entire population. Text has the value 0. diff --git a/docs/build/reference/transformer/Excel/Excel_STDEVP.md b/docs/build/reference/transformer/Excel/Excel_STDEVP.md index 277a15b5b..f5c29f08f 100644 --- a/docs/build/reference/transformer/Excel/Excel_STDEVP.md +++ b/docs/build/reference/transformer/Excel/Excel_STDEVP.md @@ -2,10 +2,12 @@ title: "Stdevp" description: "Excel STDEVP(number_1; number_2; ... number_30): Calculates the standard deviation based on the entire population. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample based on an entire population." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Stdevp + Excel STDEVP(number_1; number_2; ... number_30): Calculates the standard deviation based on the entire population. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample based on an entire population. diff --git a/docs/build/reference/transformer/Excel/Excel_STDEVPA.md b/docs/build/reference/transformer/Excel/Excel_STDEVPA.md index ad4117c01..1c0bc3881 100644 --- a/docs/build/reference/transformer/Excel/Excel_STDEVPA.md +++ b/docs/build/reference/transformer/Excel/Excel_STDEVPA.md @@ -2,10 +2,12 @@ title: "Stdevpa" description: "Excel STDEVPA(value_1; value_2; ... value_30): Calculates the standard deviation based on the entire population. Value_1; value_2; ... value_30 are values or ranges representing a sample derived from an entire population. Text has the value 0." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Stdevpa + Excel STDEVPA(value_1; value_2; ... value_30): Calculates the standard deviation based on the entire population. Value_1; value_2; ... value_30 are values or ranges representing a sample derived from an entire population. Text has the value 0. diff --git a/docs/build/reference/transformer/Excel/Excel_SUBSTITUTE.md b/docs/build/reference/transformer/Excel/Excel_SUBSTITUTE.md index dcea8157d..a0ed05a71 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUBSTITUTE.md +++ b/docs/build/reference/transformer/Excel/Excel_SUBSTITUTE.md @@ -2,10 +2,12 @@ title: "Substitute" description: "Excel SUBSTITUTE(text; search_text; new text; occurrence): Substitutes new text for old text in a string. Text is the text in which text segments are to be exchanged. Search_text is the text segment that is to be replaced (a number of times). New text is the text that is to replace the text segment. Occurrence (optional) indicates how many occurrences of the search text are to be replaced. If this parameter is missing, the search text is replaced throughout." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Substitute + Excel SUBSTITUTE(text; search_text; new text; occurrence): Substitutes new text for old text in a string. Text is the text in which text segments are to be exchanged. Search_text is the text segment that is to be replaced (a number of times). New text is the text that is to replace the text segment. Occurrence (optional) indicates how many occurrences of the search text are to be replaced. If this parameter is missing, the search text is replaced throughout. diff --git a/docs/build/reference/transformer/Excel/Excel_SUM.md b/docs/build/reference/transformer/Excel/Excel_SUM.md index 70f590f38..84dcc9a8f 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUM.md +++ b/docs/build/reference/transformer/Excel/Excel_SUM.md @@ -2,10 +2,12 @@ title: "Sum" description: "Excel SUM(number_1; number_2; ... number_30): Adds all the numbers in a range of cells. Number_1; number_2;... number_30 are up to 30 arguments whose sum is to be calculated. You can also enter a range using cell references." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sum + Excel SUM(number_1; number_2; ... number_30): Adds all the numbers in a range of cells. Number_1; number_2;... number_30 are up to 30 arguments whose sum is to be calculated. You can also enter a range using cell references. diff --git a/docs/build/reference/transformer/Excel/Excel_SUMPRODUCT.md b/docs/build/reference/transformer/Excel/Excel_SUMPRODUCT.md index 9e5f86cf7..e39e0c5c1 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMPRODUCT.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMPRODUCT.md @@ -2,10 +2,12 @@ title: "Sumproduct" description: "Excel SUMPRODUCT(array 1; array 2; ...array 30): Multiplies corresponding elements in the given arrays, and returns the sum of those products. Array 1; array 2;...array 30 are arrays whose corresponding elements are to be multiplied. At least one array must be part of the argument list. If only one array is given, all array elements are summed." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sumproduct + Excel SUMPRODUCT(array 1; array 2; ...array 30): Multiplies corresponding elements in the given arrays, and returns the sum of those products. Array 1; array 2;...array 30 are arrays whose corresponding elements are to be multiplied. At least one array must be part of the argument list. If only one array is given, all array elements are summed. diff --git a/docs/build/reference/transformer/Excel/Excel_SUMSQ.md b/docs/build/reference/transformer/Excel/Excel_SUMSQ.md index 33143af95..c2e44b195 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMSQ.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMSQ.md @@ -2,10 +2,12 @@ title: "Sumsq" description: "Excel SUMSQ(number_1; number_2; ... number_30): Calculates the sum of the squares of numbers (totaling up of the squares of the arguments) Number_1; number_2;... number_30 are up to 30 arguments, the sum of whose squares is to be calculated." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sumsq + Excel SUMSQ(number_1; number_2; ... number_30): Calculates the sum of the squares of numbers (totaling up of the squares of the arguments) Number_1; number_2;... number_30 are up to 30 arguments, the sum of whose squares is to be calculated. diff --git a/docs/build/reference/transformer/Excel/Excel_SUMX2MY2.md b/docs/build/reference/transformer/Excel/Excel_SUMX2MY2.md index 9d43107e1..97ddedee2 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMX2MY2.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMX2MY2.md @@ -2,10 +2,12 @@ title: "Sumx2my2" description: "Excel SUMX2MY2(array_X; array_Y): Returns the sum of the difference of squares of corresponding values in two arrays. Array_X is the first array whose elements are to be squared and added. Array_Y is the second array whose elements are to be squared and subtracted." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sumx2my2 + Excel SUMX2MY2(array_X; array_Y): Returns the sum of the difference of squares of corresponding values in two arrays. Array_X is the first array whose elements are to be squared and added. Array_Y is the second array whose elements are to be squared and subtracted. diff --git a/docs/build/reference/transformer/Excel/Excel_SUMX2PY2.md b/docs/build/reference/transformer/Excel/Excel_SUMX2PY2.md index e18b10845..99228dece 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMX2PY2.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMX2PY2.md @@ -2,10 +2,12 @@ title: "Sumx2py2" description: "Excel SUMX2PY2(array_X; array_Y): Returns the sum of the sum of squares of corresponding values in two arrays. Array_X is the first array whose arguments are to be squared and added. Array_Y is the second array, whose elements are to be added and squared." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sumx2py2 + Excel SUMX2PY2(array_X; array_Y): Returns the sum of the sum of squares of corresponding values in two arrays. Array_X is the first array whose arguments are to be squared and added. Array_Y is the second array, whose elements are to be added and squared. diff --git a/docs/build/reference/transformer/Excel/Excel_SUMXMY2.md b/docs/build/reference/transformer/Excel/Excel_SUMXMY2.md index ae9d8068b..5e56544d0 100644 --- a/docs/build/reference/transformer/Excel/Excel_SUMXMY2.md +++ b/docs/build/reference/transformer/Excel/Excel_SUMXMY2.md @@ -2,10 +2,12 @@ title: "Sumxmy2" description: "Excel SUMXMY2(array_X; array_Y): Adds the squares of the variance between corresponding values in two arrays. Array_X is the first array whose elements are to be subtracted and squared. Array_Y is the second array, whose elements are to be subtracted and squared." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sumxmy2 + Excel SUMXMY2(array_X; array_Y): Adds the squares of the variance between corresponding values in two arrays. Array_X is the first array whose elements are to be subtracted and squared. Array_Y is the second array, whose elements are to be subtracted and squared. diff --git a/docs/build/reference/transformer/Excel/Excel_TAN.md b/docs/build/reference/transformer/Excel/Excel_TAN.md index 86877770a..1ba9289fa 100644 --- a/docs/build/reference/transformer/Excel/Excel_TAN.md +++ b/docs/build/reference/transformer/Excel/Excel_TAN.md @@ -2,10 +2,12 @@ title: "Tan" description: "Excel TAN(number): Returns the tangent of the given number (angle in radians)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Tan + Excel TAN(number): Returns the tangent of the given number (angle in radians). diff --git a/docs/build/reference/transformer/Excel/Excel_TANH.md b/docs/build/reference/transformer/Excel/Excel_TANH.md index aa17b935d..83ceda7c9 100644 --- a/docs/build/reference/transformer/Excel/Excel_TANH.md +++ b/docs/build/reference/transformer/Excel/Excel_TANH.md @@ -2,10 +2,12 @@ title: "Tanh" description: "Excel TANH(number): Returns the hyperbolic tangent of the given number (angle in radians)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Tanh + Excel TANH(number): Returns the hyperbolic tangent of the given number (angle in radians). diff --git a/docs/build/reference/transformer/Excel/Excel_TDIST.md b/docs/build/reference/transformer/Excel/Excel_TDIST.md index 597ac7bf3..87bb47115 100644 --- a/docs/build/reference/transformer/Excel/Excel_TDIST.md +++ b/docs/build/reference/transformer/Excel/Excel_TDIST.md @@ -2,10 +2,12 @@ title: "Tdist" description: "Excel TDIST(number; degrees_freedom; mode): Returns the t-distribution for the given Number. Degrees_freedom is the number of degrees of freedom for the t-distribution. Mode = 1 returns the one-tailed test, Mode = 2 returns the two-tailed test." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Tdist + Excel TDIST(number; degrees_freedom; mode): Returns the t-distribution for the given Number. Degrees_freedom is the number of degrees of freedom for the t-distribution. Mode = 1 returns the one-tailed test, Mode = 2 returns the two-tailed test. diff --git a/docs/build/reference/transformer/Excel/Excel_TRUE.md b/docs/build/reference/transformer/Excel/Excel_TRUE.md index 2e7d3f0ad..897a851b4 100644 --- a/docs/build/reference/transformer/Excel/Excel_TRUE.md +++ b/docs/build/reference/transformer/Excel/Excel_TRUE.md @@ -2,10 +2,12 @@ title: "True" description: "Excel TRUE(): Sets the logical value to TRUE. The TRUE() function does not require any arguments." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # True + Excel TRUE(): Sets the logical value to TRUE. The TRUE() function does not require any arguments. diff --git a/docs/build/reference/transformer/Excel/Excel_TRUNC.md b/docs/build/reference/transformer/Excel/Excel_TRUNC.md index 9720f9040..df12eb72e 100644 --- a/docs/build/reference/transformer/Excel/Excel_TRUNC.md +++ b/docs/build/reference/transformer/Excel/Excel_TRUNC.md @@ -2,10 +2,12 @@ title: "Trunc" description: "Excel TRUNC(number; count): Truncates a number to an integer by removing the fractional part of the number according to the precision specified in Tools > Options > OpenOffice.org Calc > Calculate. Number is the number whose decimal places are to be cut off. Count is the number of decimal places which are not cut off." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Trunc + Excel TRUNC(number; count): Truncates a number to an integer by removing the fractional part of the number according to the precision specified in Tools > Options > OpenOffice.org Calc > Calculate. Number is the number whose decimal places are to be cut off. Count is the number of decimal places which are not cut off. diff --git a/docs/build/reference/transformer/Excel/Excel_VAR.md b/docs/build/reference/transformer/Excel/Excel_VAR.md index 044904b53..421896627 100644 --- a/docs/build/reference/transformer/Excel/Excel_VAR.md +++ b/docs/build/reference/transformer/Excel/Excel_VAR.md @@ -2,10 +2,12 @@ title: "Var" description: "Excel VAR(number_1; number_2; ... number_30): Estimates the variance based on a sample. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample based on an entire population." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Var + Excel VAR(number_1; number_2; ... number_30): Estimates the variance based on a sample. Number_1; number_2; ... number_30 are numerical values or ranges representing a sample based on an entire population. diff --git a/docs/build/reference/transformer/Excel/Excel_VARA.md b/docs/build/reference/transformer/Excel/Excel_VARA.md index 2bc082cbb..80bb2cd5a 100644 --- a/docs/build/reference/transformer/Excel/Excel_VARA.md +++ b/docs/build/reference/transformer/Excel/Excel_VARA.md @@ -2,10 +2,12 @@ title: "Vara" description: "Excel VARA(value_1; value_2; ... value_30): Estimates a variance based on a sample. The value of text is 0. Value_1; value_2; ... value_30 are values or ranges representing a sample derived from an entire population. Text has the value 0." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Vara + Excel VARA(value_1; value_2; ... value_30): Estimates a variance based on a sample. The value of text is 0. Value_1; value_2; ... value_30 are values or ranges representing a sample derived from an entire population. Text has the value 0. diff --git a/docs/build/reference/transformer/Excel/Excel_VARP.md b/docs/build/reference/transformer/Excel/Excel_VARP.md index 76d2e0987..7de7a39b2 100644 --- a/docs/build/reference/transformer/Excel/Excel_VARP.md +++ b/docs/build/reference/transformer/Excel/Excel_VARP.md @@ -2,10 +2,12 @@ title: "Varp" description: "Excel VARP(Number_1; number_2; ... number_30): Calculates a variance based on the entire population. Number_1; number_2; ... number_30 are numerical values or ranges representing an entire population." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Varp + Excel VARP(Number_1; number_2; ... number_30): Calculates a variance based on the entire population. Number_1; number_2; ... number_30 are numerical values or ranges representing an entire population. diff --git a/docs/build/reference/transformer/Excel/Excel_VARPA.md b/docs/build/reference/transformer/Excel/Excel_VARPA.md index 45f7f9211..5a4198c61 100644 --- a/docs/build/reference/transformer/Excel/Excel_VARPA.md +++ b/docs/build/reference/transformer/Excel/Excel_VARPA.md @@ -2,10 +2,12 @@ title: "Varpa" description: "Excel VARPA(value_1; value_2; .. .value_30): Calculates the variance based on the entire population. The value of text is 0. Value_1; value_2; ... value_30 are values or ranges representing an entire population." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Varpa + Excel VARPA(value_1; value_2; .. .value_30): Calculates the variance based on the entire population. The value of text is 0. Value_1; value_2; ... value_30 are values or ranges representing an entire population. diff --git a/docs/build/reference/transformer/Extract/regexExtract.md b/docs/build/reference/transformer/Extract/regexExtract.md index ea3c01c50..df74e9d55 100644 --- a/docs/build/reference/transformer/Extract/regexExtract.md +++ b/docs/build/reference/transformer/Extract/regexExtract.md @@ -2,10 +2,12 @@ title: "Regex extract" description: "Extracts one or all matches of a regular expression within the input. If the regular expression contains one or more capturing groups, only the first group will be considered." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Regex extract + ## Description diff --git a/docs/build/reference/transformer/Filter/filterByLength.md b/docs/build/reference/transformer/Filter/filterByLength.md index 3de8bffb5..a07a90a79 100644 --- a/docs/build/reference/transformer/Filter/filterByLength.md +++ b/docs/build/reference/transformer/Filter/filterByLength.md @@ -2,10 +2,12 @@ title: "Filter by length" description: "Removes all strings that are shorter than 'min' characters and longer than 'max' characters." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Filter by length + Removes all strings that are shorter than 'min' characters and longer than 'max' characters. diff --git a/docs/build/reference/transformer/Filter/filterByRegex.md b/docs/build/reference/transformer/Filter/filterByRegex.md index d0809068c..60b2b050c 100644 --- a/docs/build/reference/transformer/Filter/filterByRegex.md +++ b/docs/build/reference/transformer/Filter/filterByRegex.md @@ -2,10 +2,12 @@ title: "Filter by regex" description: "Removes all strings that do NOT match a regex. If 'negate' is true, only strings will be removed that match the regex." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Filter by regex + Removes all strings that do NOT match a regex. If 'negate' is true, only strings will be removed that match the regex. diff --git a/docs/build/reference/transformer/Filter/removeDefaultStopWords.md b/docs/build/reference/transformer/Filter/removeDefaultStopWords.md index 6ce3c287b..bf0cbb76a 100644 --- a/docs/build/reference/transformer/Filter/removeDefaultStopWords.md +++ b/docs/build/reference/transformer/Filter/removeDefaultStopWords.md @@ -2,10 +2,12 @@ title: "Remove default stop words" description: "Removes stop words based on a default stop word list." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Remove default stop words + This stop word list filter uses the following diff --git a/docs/build/reference/transformer/Filter/removeEmptyValues.md b/docs/build/reference/transformer/Filter/removeEmptyValues.md index bc3f84cc8..8887d3e11 100644 --- a/docs/build/reference/transformer/Filter/removeEmptyValues.md +++ b/docs/build/reference/transformer/Filter/removeEmptyValues.md @@ -2,10 +2,12 @@ title: "Remove empty values" description: "Removes empty values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Remove empty values + Removes empty values. diff --git a/docs/build/reference/transformer/Filter/removeRemoteStopWords.md b/docs/build/reference/transformer/Filter/removeRemoteStopWords.md index 0cbd4217f..79fbf6425 100644 --- a/docs/build/reference/transformer/Filter/removeRemoteStopWords.md +++ b/docs/build/reference/transformer/Filter/removeRemoteStopWords.md @@ -2,10 +2,12 @@ title: "Remove remote stop words" description: "Removes stop words based on a stop word list remote URL." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Remove remote stop words + The stop word list is retrieved from a remote URL such as diff --git a/docs/build/reference/transformer/Filter/removeStopWords.md b/docs/build/reference/transformer/Filter/removeStopWords.md index 488941dd0..fb2d67793 100644 --- a/docs/build/reference/transformer/Filter/removeStopWords.md +++ b/docs/build/reference/transformer/Filter/removeStopWords.md @@ -2,10 +2,12 @@ title: "Remove stop words" description: "Removes stop words based on a stop word list resource." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Remove stop words + The stop word list is specified as a resource, e.g. a file identical to diff --git a/docs/build/reference/transformer/Filter/removeValues.md b/docs/build/reference/transformer/Filter/removeValues.md index 2683f0760..38da8eb2f 100644 --- a/docs/build/reference/transformer/Filter/removeValues.md +++ b/docs/build/reference/transformer/Filter/removeValues.md @@ -2,10 +2,12 @@ title: "Remove values" description: "Removes values that contain words from a blacklist. The blacklist values are separated with commas." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Remove values + Removes values that contain words from a blacklist. The blacklist values are separated with commas. diff --git a/docs/build/reference/transformer/Geo/RetrieveCoordinates.md b/docs/build/reference/transformer/Geo/RetrieveCoordinates.md index e602faa05..aec2fe0a0 100644 --- a/docs/build/reference/transformer/Geo/RetrieveCoordinates.md +++ b/docs/build/reference/transformer/Geo/RetrieveCoordinates.md @@ -2,10 +2,12 @@ title: "Retrieve coordinates" description: "Retrieves geographic coordinates using Nominatim." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Retrieve coordinates + **Configuration** @@ -18,18 +20,18 @@ The default configuration is as follows: # url = "https://nominatim.eccenca.com/search" url = "https://photon.komoot.de/api" # url = https://api-adresse.data.gouv.fr/search - + # Additional URL parameters to be attached to all HTTP search requests. Example: '&countrycodes=de&addressdetails=1'. # Will be attached in addition to the parameters set on each search operator directly. searchParameters = "" - + # The minimum pause time between subsequent queries pauseTime = 1s - + # Number of coordinates to be cached in-memory cacheSize = 10 } - + In general, all services adhering to the [Nominatim search API](https://nominatim.org/release-docs/develop/api/Search/) should be usable. Please note that when using public services, the pause time should be set to avoid overloading. diff --git a/docs/build/reference/transformer/Geo/RetrieveLatitude.md b/docs/build/reference/transformer/Geo/RetrieveLatitude.md index ca80644da..5f4dc19a4 100644 --- a/docs/build/reference/transformer/Geo/RetrieveLatitude.md +++ b/docs/build/reference/transformer/Geo/RetrieveLatitude.md @@ -2,10 +2,12 @@ title: "Retrieve latitude" description: "Retrieves geographic coordinates using Nominatim and returns the latitude." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Retrieve latitude + **Configuration** @@ -18,18 +20,18 @@ The default configuration is as follows: # url = "https://nominatim.eccenca.com/search" url = "https://photon.komoot.de/api" # url = https://api-adresse.data.gouv.fr/search - + # Additional URL parameters to be attached to all HTTP search requests. Example: '&countrycodes=de&addressdetails=1'. # Will be attached in addition to the parameters set on each search operator directly. searchParameters = "" - + # The minimum pause time between subsequent queries pauseTime = 1s - + # Number of coordinates to be cached in-memory cacheSize = 10 } - + In general, all services adhering to the [Nominatim search API](https://nominatim.org/release-docs/develop/api/Search/) should be usable. Please note that when using public services, the pause time should be set to avoid overloading. diff --git a/docs/build/reference/transformer/Geo/RetrieveLongitude.md b/docs/build/reference/transformer/Geo/RetrieveLongitude.md index 8da180fbe..39184c9c3 100644 --- a/docs/build/reference/transformer/Geo/RetrieveLongitude.md +++ b/docs/build/reference/transformer/Geo/RetrieveLongitude.md @@ -2,10 +2,12 @@ title: "Retrieve longitude" description: "Retrieves geographic coordinates using Nominatim and returns the longitude." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Retrieve longitude + **Configuration** @@ -18,18 +20,18 @@ The default configuration is as follows: # url = "https://nominatim.eccenca.com/search" url = "https://photon.komoot.de/api" # url = https://api-adresse.data.gouv.fr/search - + # Additional URL parameters to be attached to all HTTP search requests. Example: '&countrycodes=de&addressdetails=1'. # Will be attached in addition to the parameters set on each search operator directly. searchParameters = "" - + # The minimum pause time between subsequent queries pauseTime = 1s - + # Number of coordinates to be cached in-memory cacheSize = 10 } - + In general, all services adhering to the [Nominatim search API](https://nominatim.org/release-docs/develop/api/Search/) should be usable. Please note that when using public services, the pause time should be set to avoid overloading. diff --git a/docs/build/reference/transformer/Linguistic/NYSIIS.md b/docs/build/reference/transformer/Linguistic/NYSIIS.md index 9c0a78112..d0dc161de 100644 --- a/docs/build/reference/transformer/Linguistic/NYSIIS.md +++ b/docs/build/reference/transformer/Linguistic/NYSIIS.md @@ -2,10 +2,12 @@ title: "NYSIIS" description: "NYSIIS phonetic encoding." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # NYSIIS + This transformer plugin implements the **NYSIIS** phonetic algorithm for encoding names. diff --git a/docs/build/reference/transformer/Linguistic/metaphone.md b/docs/build/reference/transformer/Linguistic/metaphone.md index 969a179db..cd89d195f 100644 --- a/docs/build/reference/transformer/Linguistic/metaphone.md +++ b/docs/build/reference/transformer/Linguistic/metaphone.md @@ -2,10 +2,12 @@ title: "Metaphone" description: "Metaphone phonetic encoding." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Metaphone + This transformer plugin implements the **Metaphone** phonetic algorithm for indexing words according to English. diff --git a/docs/build/reference/transformer/Linguistic/normalizeChars.md b/docs/build/reference/transformer/Linguistic/normalizeChars.md index af22a0c11..019fe58ab 100644 --- a/docs/build/reference/transformer/Linguistic/normalizeChars.md +++ b/docs/build/reference/transformer/Linguistic/normalizeChars.md @@ -2,10 +2,12 @@ title: "Normalize chars" description: "Replaces diacritical characters with non-diacritical ones (eg, ö -> o), plus some specialities like transforming æ -> ae, ß -> ss." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Normalize chars + Replaces diacritical characters with non-diacritical ones (eg, ö -> o), plus some specialities like transforming æ -> ae, ß -> ss. diff --git a/docs/build/reference/transformer/Linguistic/soundex.md b/docs/build/reference/transformer/Linguistic/soundex.md index 7098c3a83..1d499de74 100644 --- a/docs/build/reference/transformer/Linguistic/soundex.md +++ b/docs/build/reference/transformer/Linguistic/soundex.md @@ -2,10 +2,12 @@ title: "Soundex" description: "Soundex algorithm." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Soundex + This transformer plugin implements the **Soundex** phonetic algorithm for indexing names by their English sounds. diff --git a/docs/build/reference/transformer/Linguistic/stem.md b/docs/build/reference/transformer/Linguistic/stem.md index d6b1f804e..7c5ba92e7 100644 --- a/docs/build/reference/transformer/Linguistic/stem.md +++ b/docs/build/reference/transformer/Linguistic/stem.md @@ -2,10 +2,12 @@ title: "Stem" description: "Stems a string using the Porter Stemmer." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Stem + Stems a string using the Porter Stemmer. diff --git a/docs/build/reference/transformer/Metadata/fileHash.md b/docs/build/reference/transformer/Metadata/fileHash.md index e06cdbb8f..bdd09ea06 100644 --- a/docs/build/reference/transformer/Metadata/fileHash.md +++ b/docs/build/reference/transformer/Metadata/fileHash.md @@ -2,10 +2,12 @@ title: "File hash" description: "Calculates the hash sum of a file. The hash sum is cached so that subsequent calls to this operator are fast. Note that initially and every time the specified resource has been updated, this operator might take a long time (depending on the file size). This operator supports using different hash algorithms from the [Secure Hash Algorithms family](https://en.wikipedia.org/wiki/Secure_Hash_Algorithms) (SHA, e.g. SHA256) and two algorithms from the [Message-Digest Algorithm family](https://en.wikipedia.org/wiki/MD5) (MD2 / MD5). Please be aware that some of these algorithms are not secure regarding collision- and other attacks. Note: This transform operator ignores any inputs." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # File hash + Calculates the hash sum of a file. The hash sum is cached so that subsequent calls to this operator are fast. diff --git a/docs/build/reference/transformer/Metadata/inputFileAttributes.md b/docs/build/reference/transformer/Metadata/inputFileAttributes.md index 43f8ae92b..fff1b72bd 100644 --- a/docs/build/reference/transformer/Metadata/inputFileAttributes.md +++ b/docs/build/reference/transformer/Metadata/inputFileAttributes.md @@ -2,10 +2,12 @@ title: "Input file attributes" description: "Retrieves a metadata attribute from the input file (such as the file name)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Input file attributes + Retrieves a metadata attribute from the input file (such as the file name). diff --git a/docs/build/reference/transformer/Metadata/inputTaskAttributes.md b/docs/build/reference/transformer/Metadata/inputTaskAttributes.md index 0e89098f4..3af86afaf 100644 --- a/docs/build/reference/transformer/Metadata/inputTaskAttributes.md +++ b/docs/build/reference/transformer/Metadata/inputTaskAttributes.md @@ -2,10 +2,12 @@ title: "Input task attributes" description: "Retrieves individual attributes from the input task (such as the modified date) or the entire task as JSON." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Input task attributes + Retrieves individual attributes from the input task (such as the modified date) or the entire task as JSON. diff --git a/docs/build/reference/transformer/Normalize/alphaReduce.md b/docs/build/reference/transformer/Normalize/alphaReduce.md index d6d2db8f0..a41d2d44b 100644 --- a/docs/build/reference/transformer/Normalize/alphaReduce.md +++ b/docs/build/reference/transformer/Normalize/alphaReduce.md @@ -2,10 +2,12 @@ title: "Strip non-alphabetic characters" description: "Strips all non-alphabetic characters from a string. Spaces are retained." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Strip non-alphabetic characters + Strips all non-alphabetic characters from a string. Spaces are retained. diff --git a/docs/build/reference/transformer/Normalize/camelCase.md b/docs/build/reference/transformer/Normalize/camelCase.md index c6fda5b29..4442a1279 100644 --- a/docs/build/reference/transformer/Normalize/camelCase.md +++ b/docs/build/reference/transformer/Normalize/camelCase.md @@ -2,10 +2,12 @@ title: "Camel case" description: "Converts a string to camel case. Upper camel case is the default, lower camel case can be chosen." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Camel case + Converts a string to camel case. Upper camel case is the default, lower camel case can be chosen. diff --git a/docs/build/reference/transformer/Normalize/capitalize.md b/docs/build/reference/transformer/Normalize/capitalize.md index 7e0375542..d831d6a9e 100644 --- a/docs/build/reference/transformer/Normalize/capitalize.md +++ b/docs/build/reference/transformer/Normalize/capitalize.md @@ -2,10 +2,12 @@ title: "Capitalize" description: "Capitalizes the string i.e. converts the first character to upper case. If 'allWords' is set to true, all words are capitalized and not only the first character." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Capitalize + Capitalizes the string i.e. converts the first character to upper case. If 'allWords' is set to true, all words are capitalized and not only the first character. diff --git a/docs/build/reference/transformer/Normalize/htmlCleaner.md b/docs/build/reference/transformer/Normalize/htmlCleaner.md index be7ebfdc2..b67284afb 100644 --- a/docs/build/reference/transformer/Normalize/htmlCleaner.md +++ b/docs/build/reference/transformer/Normalize/htmlCleaner.md @@ -2,10 +2,12 @@ title: "Clean HTML" description: "Cleans HTML markup using a tag whitelist and allows selection of HTML sections with XPath or CSS selector expressions." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Clean HTML + This transformer cleans HTML markup using a whitelist of HTML tags. diff --git a/docs/build/reference/transformer/Normalize/lowerCase.md b/docs/build/reference/transformer/Normalize/lowerCase.md index 4ea9075dc..1ac6f950a 100644 --- a/docs/build/reference/transformer/Normalize/lowerCase.md +++ b/docs/build/reference/transformer/Normalize/lowerCase.md @@ -2,10 +2,12 @@ title: "Lower case" description: "Converts a string to lower case." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Lower case + Converts a string to lower case. diff --git a/docs/build/reference/transformer/Normalize/removeBlanks.md b/docs/build/reference/transformer/Normalize/removeBlanks.md index 02c40bf25..81e613d6e 100644 --- a/docs/build/reference/transformer/Normalize/removeBlanks.md +++ b/docs/build/reference/transformer/Normalize/removeBlanks.md @@ -2,10 +2,12 @@ title: "Remove blanks" description: "Remove whitespace from a string." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Remove blanks + Remove whitespace from a string. diff --git a/docs/build/reference/transformer/Normalize/removeDuplicates.md b/docs/build/reference/transformer/Normalize/removeDuplicates.md index e21b68c46..c88d43e93 100644 --- a/docs/build/reference/transformer/Normalize/removeDuplicates.md +++ b/docs/build/reference/transformer/Normalize/removeDuplicates.md @@ -2,10 +2,12 @@ title: "Remove duplicates" description: "Removes duplicated values, making a value sequence distinct." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Remove duplicates + Removes duplicated values, making a value sequence distinct. diff --git a/docs/build/reference/transformer/Normalize/removeParentheses.md b/docs/build/reference/transformer/Normalize/removeParentheses.md index 3899f13b3..61cc79ab0 100644 --- a/docs/build/reference/transformer/Normalize/removeParentheses.md +++ b/docs/build/reference/transformer/Normalize/removeParentheses.md @@ -2,10 +2,12 @@ title: "Remove parentheses" description: "Remove all parentheses including their content, e.g., transforms 'Berlin (City)' -> 'Berlin'." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Remove parentheses + Remove all parentheses including their content, e.g., transforms 'Berlin (City)' -> 'Berlin'. diff --git a/docs/build/reference/transformer/Normalize/removeSpecialChars.md b/docs/build/reference/transformer/Normalize/removeSpecialChars.md index 167519ec6..9007a7638 100644 --- a/docs/build/reference/transformer/Normalize/removeSpecialChars.md +++ b/docs/build/reference/transformer/Normalize/removeSpecialChars.md @@ -2,10 +2,12 @@ title: "Remove special chars" description: "Remove special characters (including punctuation) from a string." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Remove special chars + Remove special characters (including punctuation) from a string. diff --git a/docs/build/reference/transformer/Normalize/sortWords.md b/docs/build/reference/transformer/Normalize/sortWords.md index 64f602876..f664e0dcd 100644 --- a/docs/build/reference/transformer/Normalize/sortWords.md +++ b/docs/build/reference/transformer/Normalize/sortWords.md @@ -2,10 +2,12 @@ title: "Sort words" description: "Sorts all words in each value lexicographically." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sort words + Sorts all words in each value lexicographically. @@ -54,7 +56,7 @@ Separator to be inserted between sorted words. * ID: `glue` * Datatype: `string` -* Default Value: ` ` +* Default Value: `` ## Advanced Parameter diff --git a/docs/build/reference/transformer/Normalize/trim.md b/docs/build/reference/transformer/Normalize/trim.md index 4720e74c2..bcdc75f3f 100644 --- a/docs/build/reference/transformer/Normalize/trim.md +++ b/docs/build/reference/transformer/Normalize/trim.md @@ -2,10 +2,12 @@ title: "Trim" description: "Remove leading and trailing whitespaces." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Trim + Remove leading and trailing whitespaces. diff --git a/docs/build/reference/transformer/Normalize/upperCase.md b/docs/build/reference/transformer/Normalize/upperCase.md index 3f30b7aa5..d77e56a96 100644 --- a/docs/build/reference/transformer/Normalize/upperCase.md +++ b/docs/build/reference/transformer/Normalize/upperCase.md @@ -2,10 +2,12 @@ title: "Upper case" description: "Converts a string to upper case." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Upper case + Converts a string to upper case. diff --git a/docs/build/reference/transformer/Normalize/uriFix.md b/docs/build/reference/transformer/Normalize/uriFix.md index b5cc4c46a..b3c5ebcb2 100644 --- a/docs/build/reference/transformer/Normalize/uriFix.md +++ b/docs/build/reference/transformer/Normalize/uriFix.md @@ -2,10 +2,12 @@ title: "Fix URI" description: "Generates valid absolute URIs from the given values. Already valid absolute URIs are left untouched." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Fix URI + Generates valid absolute URIs from the given values. Already valid absolute URIs are left untouched. diff --git a/docs/build/reference/transformer/Normalize/urlEncode.md b/docs/build/reference/transformer/Normalize/urlEncode.md index 798116325..86b1490af 100644 --- a/docs/build/reference/transformer/Normalize/urlEncode.md +++ b/docs/build/reference/transformer/Normalize/urlEncode.md @@ -2,10 +2,12 @@ title: "Encode URL" description: "URL encodes the string." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Encode URL + URL encodes the string. diff --git a/docs/build/reference/transformer/Numeric/PhysicalQuantitiesNormalizer.md b/docs/build/reference/transformer/Numeric/PhysicalQuantitiesNormalizer.md index ac3a7e90b..c89a3033c 100644 --- a/docs/build/reference/transformer/Numeric/PhysicalQuantitiesNormalizer.md +++ b/docs/build/reference/transformer/Numeric/PhysicalQuantitiesNormalizer.md @@ -2,10 +2,12 @@ title: "Normalize physical quantity" description: "Normalizes physical quantities. By default, all quantities are normalized to their base unit (SI), which is overridable. For instance, lengths will be normalized to metres by default." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Normalize physical quantity + This transformer normalizes physical quantities. diff --git a/docs/build/reference/transformer/Numeric/aggregateNumbers.md b/docs/build/reference/transformer/Numeric/aggregateNumbers.md index 3aa45d389..e4316fb23 100644 --- a/docs/build/reference/transformer/Numeric/aggregateNumbers.md +++ b/docs/build/reference/transformer/Numeric/aggregateNumbers.md @@ -2,10 +2,12 @@ title: "Aggregate numbers" description: "Applies one of the aggregation operators (`+`, `*`, `min`, `max` or `average`) to the sequence of input values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Aggregate numbers + The `aggregateNumbers` plugin applies an aggregation operator to the sequence of input values. diff --git a/docs/build/reference/transformer/Numeric/cmem-plugin-number-conversion.md b/docs/build/reference/transformer/Numeric/cmem-plugin-number-conversion.md index b753089e7..d138b2df7 100644 --- a/docs/build/reference/transformer/Numeric/cmem-plugin-number-conversion.md +++ b/docs/build/reference/transformer/Numeric/cmem-plugin-number-conversion.md @@ -2,11 +2,13 @@ title: "Convert Number Base" description: "Convert numbers between different number bases (binary, octal, decimal, hexadecimal)." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # Convert Number Base + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Numeric/compareNumbers.md b/docs/build/reference/transformer/Numeric/compareNumbers.md index 470653f60..791b3ef24 100644 --- a/docs/build/reference/transformer/Numeric/compareNumbers.md +++ b/docs/build/reference/transformer/Numeric/compareNumbers.md @@ -2,10 +2,12 @@ title: "Compare numbers" description: "Compares the numbers of two sets. Returns 1 if the comparison yields true and 0 otherwise. If there are multiple numbers in both sets, the comparator must be true for all numbers. For instance, {1,2} < {2,3} yields 0 as not all numbers in the first set are smaller than in the second." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Compare numbers + Compares the numbers of two sets. diff --git a/docs/build/reference/transformer/Numeric/extractPhysicalQuantity.md b/docs/build/reference/transformer/Numeric/extractPhysicalQuantity.md index 39d70530e..f3358672d 100644 --- a/docs/build/reference/transformer/Numeric/extractPhysicalQuantity.md +++ b/docs/build/reference/transformer/Numeric/extractPhysicalQuantity.md @@ -2,10 +2,12 @@ title: "Extract physical quantity" description: "Extracts physical quantities, such as length or weight values. Values are expected to be formatted as `{Number}{UnitPrefix}{Symbol}` and are converted to the base unit." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Extract physical quantity + Extracts physical quantities, such as length or weight values. diff --git a/docs/build/reference/transformer/Numeric/formatNumber.md b/docs/build/reference/transformer/Numeric/formatNumber.md index aa2397b05..b8cf84a3c 100644 --- a/docs/build/reference/transformer/Numeric/formatNumber.md +++ b/docs/build/reference/transformer/Numeric/formatNumber.md @@ -2,10 +2,12 @@ title: "Format number" description: "Formats a number according to a user-defined pattern. The pattern syntax is documented at: https://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Format number + Formats a number according to a user-defined pattern. diff --git a/docs/build/reference/transformer/Numeric/log.md b/docs/build/reference/transformer/Numeric/log.md index 9cd328193..e30c1cbd0 100644 --- a/docs/build/reference/transformer/Numeric/log.md +++ b/docs/build/reference/transformer/Numeric/log.md @@ -2,10 +2,12 @@ title: "Logarithm" description: "Transforms all numbers by applying the logarithm function. Non-numeric values are left unchanged." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Logarithm + Transforms all numbers by applying the logarithm function. Non-numeric values are left unchanged. diff --git a/docs/build/reference/transformer/Numeric/numOperation.md b/docs/build/reference/transformer/Numeric/numOperation.md index 245c6e58e..d8a93c7ce 100644 --- a/docs/build/reference/transformer/Numeric/numOperation.md +++ b/docs/build/reference/transformer/Numeric/numOperation.md @@ -2,10 +2,12 @@ title: "Numeric operation" description: "Applies one of the four basic arithmetic operators to the sequence of input values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Numeric operation + The `numOperation` plugin applies one of the four basic arithmetic operators to the sequence of input values. diff --git a/docs/build/reference/transformer/Numeric/numReduce.md b/docs/build/reference/transformer/Numeric/numReduce.md index 7c15ceb1c..3372a2641 100644 --- a/docs/build/reference/transformer/Numeric/numReduce.md +++ b/docs/build/reference/transformer/Numeric/numReduce.md @@ -2,10 +2,12 @@ title: "Numeric reduce" description: "Strip all non-numeric characters from a string." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Numeric reduce + Strip all non-numeric characters from a string. diff --git a/docs/build/reference/transformer/Parser/DateTypeParser.md b/docs/build/reference/transformer/Parser/DateTypeParser.md index 67b152178..7e61fab11 100644 --- a/docs/build/reference/transformer/Parser/DateTypeParser.md +++ b/docs/build/reference/transformer/Parser/DateTypeParser.md @@ -2,10 +2,12 @@ title: "Parse date" description: "Parses and normalizes dates in different formats." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Parse date + Parses and normalizes dates in different formats. diff --git a/docs/build/reference/transformer/Parser/FloatTypeParser.md b/docs/build/reference/transformer/Parser/FloatTypeParser.md index 2e47c616c..b53b494cc 100644 --- a/docs/build/reference/transformer/Parser/FloatTypeParser.md +++ b/docs/build/reference/transformer/Parser/FloatTypeParser.md @@ -2,10 +2,12 @@ title: "Parse float" description: "Parses and normalizes float values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Parse float + Parses and normalizes float values. diff --git a/docs/build/reference/transformer/Parser/GeoCoordinateParser.md b/docs/build/reference/transformer/Parser/GeoCoordinateParser.md index fc3ecc2f7..025608701 100644 --- a/docs/build/reference/transformer/Parser/GeoCoordinateParser.md +++ b/docs/build/reference/transformer/Parser/GeoCoordinateParser.md @@ -2,10 +2,12 @@ title: "Parse geo coordinate" description: "Parses and normalizes geo coordinates." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Parse geo coordinate + Parses and normalizes geo coordinates. diff --git a/docs/build/reference/transformer/Parser/GeoLocationParser.md b/docs/build/reference/transformer/Parser/GeoLocationParser.md index 0cd11cfa7..669c80e21 100644 --- a/docs/build/reference/transformer/Parser/GeoLocationParser.md +++ b/docs/build/reference/transformer/Parser/GeoLocationParser.md @@ -2,10 +2,12 @@ title: "Parse geo location" description: "Parses and normalizes geo locations like continents, countries, states and cities." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Parse geo location + Parses and normalizes geo locations like continents, countries, states and cities. diff --git a/docs/build/reference/transformer/Parser/IntegerParser.md b/docs/build/reference/transformer/Parser/IntegerParser.md index 7f37f0098..72923498e 100644 --- a/docs/build/reference/transformer/Parser/IntegerParser.md +++ b/docs/build/reference/transformer/Parser/IntegerParser.md @@ -2,10 +2,12 @@ title: "Parse integer" description: "Parses integer values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Parse integer + Parses integer values. diff --git a/docs/build/reference/transformer/Parser/IsinParser.md b/docs/build/reference/transformer/Parser/IsinParser.md index 40c264b65..9e8a759eb 100644 --- a/docs/build/reference/transformer/Parser/IsinParser.md +++ b/docs/build/reference/transformer/Parser/IsinParser.md @@ -2,10 +2,12 @@ title: "Parse ISIN" description: "Parses International Securities Identification Numbers (ISIN) values and fails if the String is no valid ISIN." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Parse ISIN + Parses International Securities Identification Numbers (ISIN) values and fails if the String is no valid ISIN. diff --git a/docs/build/reference/transformer/Parser/SkosTypeParser.md b/docs/build/reference/transformer/Parser/SkosTypeParser.md index 658bd36bd..d9725b999 100644 --- a/docs/build/reference/transformer/Parser/SkosTypeParser.md +++ b/docs/build/reference/transformer/Parser/SkosTypeParser.md @@ -2,10 +2,12 @@ title: "Parse SKOS term" description: "Parses values from a SKOS ontology." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Parse SKOS term + Parses values from a SKOS ontology. diff --git a/docs/build/reference/transformer/Parser/StringParser.md b/docs/build/reference/transformer/Parser/StringParser.md index df6c8f0f6..7369687e8 100644 --- a/docs/build/reference/transformer/Parser/StringParser.md +++ b/docs/build/reference/transformer/Parser/StringParser.md @@ -2,10 +2,12 @@ title: "Parse string" description: "Parses string values. This is basically an identity function." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Parse string + Parses string values. This is basically an identity function. diff --git a/docs/build/reference/transformer/Replace/excelMap.md b/docs/build/reference/transformer/Replace/excelMap.md index 2d2629b09..047c936c7 100644 --- a/docs/build/reference/transformer/Replace/excelMap.md +++ b/docs/build/reference/transformer/Replace/excelMap.md @@ -2,10 +2,12 @@ title: "Excel map" description: "Replaces values based on a map of values read from a file in Open XML format (XLSX). The XLSX file may contain several sheets of the form: mapFrom,mapTo , ... and more An empty string can be created in Excel and alternatives by inserting `=''` in the input line of a cell. If there are multiple values for a single key, all values will be returned for the given key. Note that the mapping table will be cached in memory. If the Excel file is updated (even while transforming), the map will be reloaded within seconds." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Excel map + Replaces values based on a map of values read from a file in Open XML format (XLSX). diff --git a/docs/build/reference/transformer/Replace/map.md b/docs/build/reference/transformer/Replace/map.md index f71f0837d..0c6721345 100644 --- a/docs/build/reference/transformer/Replace/map.md +++ b/docs/build/reference/transformer/Replace/map.md @@ -2,10 +2,12 @@ title: "Map" description: "Replaces values based on a map of values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Map + Replaces values based on a map of values. diff --git a/docs/build/reference/transformer/Replace/mapWithDefaultInput.md b/docs/build/reference/transformer/Replace/mapWithDefaultInput.md index bdd85dd03..c9b8c0e9c 100644 --- a/docs/build/reference/transformer/Replace/mapWithDefaultInput.md +++ b/docs/build/reference/transformer/Replace/mapWithDefaultInput.md @@ -2,10 +2,12 @@ title: "Map with default" description: "Maps input values from the first input using a predefined map, with fallback to default values provided by the second input." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Map with default + This transformer requires a _map of values_, when created. This can be a map such as `"A:1,B:2,C:3"`, representing the mapping between the first three letters and the corresponding numbers (i.e. `A` to `1`, `B` to `2` and `C` to `3`). diff --git a/docs/build/reference/transformer/Replace/regexReplace.md b/docs/build/reference/transformer/Replace/regexReplace.md index a8ef1a75a..325dfb829 100644 --- a/docs/build/reference/transformer/Replace/regexReplace.md +++ b/docs/build/reference/transformer/Replace/regexReplace.md @@ -2,10 +2,12 @@ title: "Regex replace" description: "Replace all occurrences of a regular expression in a string. If no replacement is given, the occurrences of the regular expression will be deleted." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Regex replace + ## Description diff --git a/docs/build/reference/transformer/Replace/replace.md b/docs/build/reference/transformer/Replace/replace.md index 492f995bf..6bb1085f0 100644 --- a/docs/build/reference/transformer/Replace/replace.md +++ b/docs/build/reference/transformer/Replace/replace.md @@ -2,10 +2,12 @@ title: "Replace" description: "Replace all occurrences of a string with another string." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Replace + Replace all occurrences of a string with another string. @@ -18,7 +20,7 @@ Replace all occurrences of a string with another string. **Example 1:** * Parameters - * search: ` ` + * search: `` * replace: `` * Input values: diff --git a/docs/build/reference/transformer/Selection/coalesce.md b/docs/build/reference/transformer/Selection/coalesce.md index 87e7bf5f8..8b39ccd6f 100644 --- a/docs/build/reference/transformer/Selection/coalesce.md +++ b/docs/build/reference/transformer/Selection/coalesce.md @@ -2,10 +2,12 @@ title: "Coalesce (first non-empty input)" description: "Forwards the first non-empty input, i.e. for which any value(s) exist. A single empty string is considered a value." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Coalesce (first non-empty input) + Forwards the first non-empty input, i.e. for which any value(s) exist. A single empty string is considered a value. diff --git a/docs/build/reference/transformer/Selection/regexSelect.md b/docs/build/reference/transformer/Selection/regexSelect.md index 13386fdb7..119948a41 100644 --- a/docs/build/reference/transformer/Selection/regexSelect.md +++ b/docs/build/reference/transformer/Selection/regexSelect.md @@ -2,10 +2,12 @@ title: "Regex selection" description: "This transformer takes 3 inputs: one output value, multiple regex patterns, and a value to check against those patterns. It returns the output value at positions where regex patterns match the input value." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Regex selection + ## Description of the plugin diff --git a/docs/build/reference/transformer/Sequence/count.md b/docs/build/reference/transformer/Sequence/count.md index 985d51a4b..a26555856 100644 --- a/docs/build/reference/transformer/Sequence/count.md +++ b/docs/build/reference/transformer/Sequence/count.md @@ -2,10 +2,12 @@ title: "Count values" description: "Counts the number of values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Count values + Counts the number of values. diff --git a/docs/build/reference/transformer/Sequence/getValueByIndex.md b/docs/build/reference/transformer/Sequence/getValueByIndex.md index ff3fa67ec..deda40c21 100644 --- a/docs/build/reference/transformer/Sequence/getValueByIndex.md +++ b/docs/build/reference/transformer/Sequence/getValueByIndex.md @@ -2,10 +2,12 @@ title: "Get value by index" description: "Returns the value found at the specified index. Fails or returns an empty result depending on failIfNoFound is set or not. Please be aware that this will work only if the data source supports some kind of ordering like XML or JSON. This is probably not a good idea to do with RDF models. If emptyStringToEmptyResult is true then instead of a result with an empty String, an empty result is returned." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Get value by index + Returns the value found at the specified index. Fails or returns an empty result depending on failIfNoFound is set or not. @@ -13,7 +15,6 @@ Returns the value found at the specified index. Fails or returns an empty result is probably not a good idea to do with RDF models. If emptyStringToEmptyResult is true then instead of a result with an empty String, an empty result is returned. - ## Parameter diff --git a/docs/build/reference/transformer/Sequence/sort.md b/docs/build/reference/transformer/Sequence/sort.md index 2eb402732..c14ba7aa7 100644 --- a/docs/build/reference/transformer/Sequence/sort.md +++ b/docs/build/reference/transformer/Sequence/sort.md @@ -2,10 +2,12 @@ title: "Sort" description: "Sorts values lexicographically." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sort + Sorts values lexicographically. diff --git a/docs/build/reference/transformer/Sequence/toSequenceIndex.md b/docs/build/reference/transformer/Sequence/toSequenceIndex.md index a5567d215..a1a041087 100644 --- a/docs/build/reference/transformer/Sequence/toSequenceIndex.md +++ b/docs/build/reference/transformer/Sequence/toSequenceIndex.md @@ -2,10 +2,12 @@ title: "Sequence values to indexes" description: "Transforms the sequence of values to their respective indexes in the sequence. If there is more than one input, the values are numbered from the first input on and continued for the next inputs. Applied against an RDF source the order might not be deterministic." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Sequence values to indexes + Transforms the sequence of values to their respective indexes in the sequence. If there is more than one input, the values are numbered from the first input on and continued for the next inputs. Applied against an RDF source the order might not be deterministic. diff --git a/docs/build/reference/transformer/Substring/stripPostfix.md b/docs/build/reference/transformer/Substring/stripPostfix.md index da38d0400..772d3f32d 100644 --- a/docs/build/reference/transformer/Substring/stripPostfix.md +++ b/docs/build/reference/transformer/Substring/stripPostfix.md @@ -2,10 +2,12 @@ title: "Strip postfix" description: "Strips a postfix of a string." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Strip postfix + Strips a postfix of a string. diff --git a/docs/build/reference/transformer/Substring/stripPrefix.md b/docs/build/reference/transformer/Substring/stripPrefix.md index 96a808c77..133cdd759 100644 --- a/docs/build/reference/transformer/Substring/stripPrefix.md +++ b/docs/build/reference/transformer/Substring/stripPrefix.md @@ -2,10 +2,12 @@ title: "Strip prefix" description: "Strips a prefix of a string." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Strip prefix + Strips a prefix of a string. diff --git a/docs/build/reference/transformer/Substring/stripUriPrefix.md b/docs/build/reference/transformer/Substring/stripUriPrefix.md index b24361010..2bb959e6f 100644 --- a/docs/build/reference/transformer/Substring/stripUriPrefix.md +++ b/docs/build/reference/transformer/Substring/stripUriPrefix.md @@ -2,10 +2,12 @@ title: "Strip URI prefix" description: "Strips the URI prefix and decodes the remainder based on UTF-8 URL decoding (using java.net.URLDecoder). Leaves values unchanged which are not a valid URI." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Strip URI prefix + Strips the URI prefix and decodes the remainder based on UTF-8 URL decoding (using java.net.URLDecoder). Leaves values unchanged which are not a valid URI. diff --git a/docs/build/reference/transformer/Substring/substring.md b/docs/build/reference/transformer/Substring/substring.md index d2f6e630e..2e30fa1f5 100644 --- a/docs/build/reference/transformer/Substring/substring.md +++ b/docs/build/reference/transformer/Substring/substring.md @@ -2,10 +2,12 @@ title: "Substring" description: "Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). If 'endIndex' is 0 (default), it is ignored and the entire remaining string starting with 'beginIndex' is returned. If 'endIndex' is negative, -endIndex characters are removed from the end." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Substring + Returns a substring between 'beginIndex' (inclusive) and 'endIndex' (exclusive). If 'endIndex' is 0 (default), it is ignored and the entire remaining string starting with 'beginIndex' is returned. If 'endIndex' is negative, -endIndex characters are removed from the end. diff --git a/docs/build/reference/transformer/Substring/untilCharacter.md b/docs/build/reference/transformer/Substring/untilCharacter.md index 2ed6f9930..98fdd7453 100644 --- a/docs/build/reference/transformer/Substring/untilCharacter.md +++ b/docs/build/reference/transformer/Substring/untilCharacter.md @@ -2,10 +2,12 @@ title: "Until character" description: "Extracts the substring until the character given." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Until character + Extracts the substring until the character given. diff --git a/docs/build/reference/transformer/Template/TemplateTransformer.md b/docs/build/reference/transformer/Template/TemplateTransformer.md index 1354b7446..7d40af32c 100644 --- a/docs/build/reference/transformer/Template/TemplateTransformer.md +++ b/docs/build/reference/transformer/Template/TemplateTransformer.md @@ -2,10 +2,12 @@ title: "Evaluate template" description: "Evaluates a template. Input values can be addressed using the variables 'input1', 'input2', etc. Global variables are available in the 'global' scope, e.g., 'global.myVar'." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Evaluate template + Evaluates a template. Input values can be addressed using the variables 'input1', 'input2', etc. Global variables are available in the 'global' scope, e.g., 'global.myVar'. @@ -20,9 +22,9 @@ Evaluates a template. Input values can be addressed using the variables 'input1' * Parameters * template: - ``` + ```text Hello {{input1}} {{input2}}, - + How are you today? ``` @@ -32,9 +34,9 @@ Evaluates a template. Input values can be addressed using the variables 'input1' * Returns: - ``` + ```text [Hello John Doe, - + How are you today?] ``` diff --git a/docs/build/reference/transformer/Tokenization/camelcasetokenizer.md b/docs/build/reference/transformer/Tokenization/camelcasetokenizer.md index 2a6abb972..23612cada 100644 --- a/docs/build/reference/transformer/Tokenization/camelcasetokenizer.md +++ b/docs/build/reference/transformer/Tokenization/camelcasetokenizer.md @@ -2,10 +2,12 @@ title: "Camel case tokenizer" description: "Tokenizes a camel case string. That is it splits strings between a lower case character and an upper case character." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Camel case tokenizer + Tokenizes a camel case string. That is it splits strings between a lower case character and an upper case character. diff --git a/docs/build/reference/transformer/Tokenization/tokenize.md b/docs/build/reference/transformer/Tokenization/tokenize.md index e3deed86e..2c6bec129 100644 --- a/docs/build/reference/transformer/Tokenization/tokenize.md +++ b/docs/build/reference/transformer/Tokenization/tokenize.md @@ -2,10 +2,12 @@ title: "Tokenize" description: "Tokenizes all input values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Tokenize + Tokenizes all input values. diff --git a/docs/build/reference/transformer/Uncategorized/cmem-plugin-jq-transform.md b/docs/build/reference/transformer/Uncategorized/cmem-plugin-jq-transform.md index 732cd688a..43c424120 100644 --- a/docs/build/reference/transformer/Uncategorized/cmem-plugin-jq-transform.md +++ b/docs/build/reference/transformer/Uncategorized/cmem-plugin-jq-transform.md @@ -2,11 +2,13 @@ title: "jq" description: "Process a JSON path with a jq filter / program." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # jq + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Uncategorized/cmem_plugin_currencies-transform.md b/docs/build/reference/transformer/Uncategorized/cmem_plugin_currencies-transform.md index bb7dadb96..4536aff22 100644 --- a/docs/build/reference/transformer/Uncategorized/cmem_plugin_currencies-transform.md +++ b/docs/build/reference/transformer/Uncategorized/cmem_plugin_currencies-transform.md @@ -2,11 +2,13 @@ title: "Convert currency values" description: "Converts currencies values with current and historical exchange rates" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # Convert currency values + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Validation/validateDateAfter.md b/docs/build/reference/transformer/Validation/validateDateAfter.md index 4d49ee3ef..a448d0cd3 100644 --- a/docs/build/reference/transformer/Validation/validateDateAfter.md +++ b/docs/build/reference/transformer/Validation/validateDateAfter.md @@ -2,10 +2,12 @@ title: "Validate date after" description: "Validates if the first input date is after the second input date. Outputs the first input if the validation is successful." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Validate date after + Validates if the first input date is after the second input date. Outputs the first input if the validation is successful. diff --git a/docs/build/reference/transformer/Validation/validateDateRange.md b/docs/build/reference/transformer/Validation/validateDateRange.md index c34db406e..f6199df73 100644 --- a/docs/build/reference/transformer/Validation/validateDateRange.md +++ b/docs/build/reference/transformer/Validation/validateDateRange.md @@ -2,10 +2,12 @@ title: "Validate date range" description: "Validates if dates are within a specified range." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Validate date range + Validates if dates are within a specified range. diff --git a/docs/build/reference/transformer/Validation/validateNumberOfValues.md b/docs/build/reference/transformer/Validation/validateNumberOfValues.md index bf7c404af..2a176f2b1 100644 --- a/docs/build/reference/transformer/Validation/validateNumberOfValues.md +++ b/docs/build/reference/transformer/Validation/validateNumberOfValues.md @@ -2,10 +2,12 @@ title: "Validate number of values" description: "Validates that the number of values lies in a specified range." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Validate number of values + Validates that the number of values lies in a specified range. diff --git a/docs/build/reference/transformer/Validation/validateNumericRange.md b/docs/build/reference/transformer/Validation/validateNumericRange.md index 177009089..34b43860e 100644 --- a/docs/build/reference/transformer/Validation/validateNumericRange.md +++ b/docs/build/reference/transformer/Validation/validateNumericRange.md @@ -2,10 +2,12 @@ title: "Validate numeric range" description: "Validates if a number is within a specified range." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Validate numeric range + Validates if a number is within a specified range. diff --git a/docs/build/reference/transformer/Validation/validateRegex.md b/docs/build/reference/transformer/Validation/validateRegex.md index 3b7495118..fe5367562 100644 --- a/docs/build/reference/transformer/Validation/validateRegex.md +++ b/docs/build/reference/transformer/Validation/validateRegex.md @@ -2,10 +2,12 @@ title: "Validate regex" description: "Validates if all values match a regular expression." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Validate regex + ## Description diff --git a/docs/build/reference/transformer/Value/cmem-plugin-ulid.md b/docs/build/reference/transformer/Value/cmem-plugin-ulid.md index 15c4be6fe..11d2a4058 100644 --- a/docs/build/reference/transformer/Value/cmem-plugin-ulid.md +++ b/docs/build/reference/transformer/Value/cmem-plugin-ulid.md @@ -2,11 +2,13 @@ title: "ULID" description: "Generate ULID strings - Universally Unique Lexicographically Sortable Identifiers." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # ULID + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1.md index a41671162..185466f31 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1.md @@ -2,11 +2,13 @@ title: "UUID1" description: "Generate a UUIDv1 from a host ID, sequence number, and the current time" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID1 + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1ToUUID6.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1ToUUID6.md index 4e2055f1e..18acbd01f 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1ToUUID6.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID1ToUUID6.md @@ -2,11 +2,13 @@ title: "UUID1 to UUID6" description: "Generate UUIDv6 from a UUIDv1." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID1 to UUID6 + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID3.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID3.md index 483731046..5a0bbcb1d 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID3.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID3.md @@ -2,11 +2,13 @@ title: "UUID3" description: "Generate a UUIDv3" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID3 + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID4.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID4.md index 3694e7768..bd3319d68 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID4.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID4.md @@ -2,11 +2,13 @@ title: "UUID4" description: "Generate a random UUIDv4." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID4 + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID5.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID5.md index 3a93b8e0c..01c0fa631 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID5.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID5.md @@ -2,11 +2,13 @@ title: "UUID5" description: "Generate a UUIDv5" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID5 + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID6.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID6.md index 4297d57d2..19580e3c5 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID6.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID6.md @@ -2,11 +2,13 @@ title: "UUID6" description: "Generate a UUIDv6 from a host ID, sequence number, and the current time" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID6 + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID7.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID7.md index ff98bd6d5..d493ea88a 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID7.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID7.md @@ -2,11 +2,13 @@ title: "UUID7" description: "Generate a UUIDv7 from a random number, and the current time." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID7 + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID8.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID8.md index 7d139eb86..9c8f62db2 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID8.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUID8.md @@ -2,11 +2,13 @@ title: "UUID8" description: "Generate a UUIDv8 from a random number, and the current time." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID8 + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDConvert.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDConvert.md index b52b57e80..160af0833 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDConvert.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDConvert.md @@ -2,11 +2,13 @@ title: "UUID Convert" description: "Convert a UUID string representation" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID Convert + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDVersion.md b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDVersion.md index 39d23e6ab..acd950878 100644 --- a/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDVersion.md +++ b/docs/build/reference/transformer/Value/cmem_plugin_uuid-plugin_uuid-UUIDVersion.md @@ -2,11 +2,13 @@ title: "UUID Version" description: "Outputs UUID version number of input" icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator - PythonPlugin --- + # UUID Version + !!! note inline end "Python Plugin" diff --git a/docs/build/reference/transformer/Value/constant.md b/docs/build/reference/transformer/Value/constant.md index 1ca43caa4..905fcd354 100644 --- a/docs/build/reference/transformer/Value/constant.md +++ b/docs/build/reference/transformer/Value/constant.md @@ -2,10 +2,12 @@ title: "Constant" description: "Generates a constant value." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Constant + Generates a constant value. diff --git a/docs/build/reference/transformer/Value/constantUri.md b/docs/build/reference/transformer/Value/constantUri.md index 03ecab8e3..be8981f14 100644 --- a/docs/build/reference/transformer/Value/constantUri.md +++ b/docs/build/reference/transformer/Value/constantUri.md @@ -2,10 +2,12 @@ title: "Constant URI" description: "Generates a constant URI." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Constant URI + Generates a constant URI. diff --git a/docs/build/reference/transformer/Value/datasetParameter.md b/docs/build/reference/transformer/Value/datasetParameter.md index 893e40baa..0cfeb4796 100644 --- a/docs/build/reference/transformer/Value/datasetParameter.md +++ b/docs/build/reference/transformer/Value/datasetParameter.md @@ -2,10 +2,12 @@ title: "Dataset parameter" description: "Reads a meta data parameter from a dataset in Corporate Memory. If authentication is enabled, workbench.superuser must be configured." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Dataset parameter + Reads a meta data parameter from a dataset in Corporate Memory. If authentication is enabled, workbench.superuser must be configured. diff --git a/docs/build/reference/transformer/Value/defaultValue.md b/docs/build/reference/transformer/Value/defaultValue.md index 6fa7df705..33b105f2f 100644 --- a/docs/build/reference/transformer/Value/defaultValue.md +++ b/docs/build/reference/transformer/Value/defaultValue.md @@ -2,10 +2,12 @@ title: "Default Value" description: "Generates a default value, if the input values are empty. Forwards any non-empty values." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Default Value + Generates a default value, if the input values are empty. Forwards any non-empty values. diff --git a/docs/build/reference/transformer/Value/emptyValue.md b/docs/build/reference/transformer/Value/emptyValue.md index 649854e3d..399c335ec 100644 --- a/docs/build/reference/transformer/Value/emptyValue.md +++ b/docs/build/reference/transformer/Value/emptyValue.md @@ -2,10 +2,12 @@ title: "Empty value" description: "Generates an empty value." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Empty value + Generates an empty value. diff --git a/docs/build/reference/transformer/Value/inputHash.md b/docs/build/reference/transformer/Value/inputHash.md index b78c7df08..ad5146cbb 100644 --- a/docs/build/reference/transformer/Value/inputHash.md +++ b/docs/build/reference/transformer/Value/inputHash.md @@ -2,10 +2,12 @@ title: "Input hash" description: "Calculates the hash sum of the input values. Generates a single hash sum for all input values combined." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Input hash + Calculates the hash sum of the input values. Generates a single hash sum for all input values combined. diff --git a/docs/build/reference/transformer/Value/randomNumber.md b/docs/build/reference/transformer/Value/randomNumber.md index 1ad34e622..ad3212cd1 100644 --- a/docs/build/reference/transformer/Value/randomNumber.md +++ b/docs/build/reference/transformer/Value/randomNumber.md @@ -2,10 +2,12 @@ title: "Random number" description: "Generates a set of random numbers." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Random number + Generates a set of random numbers. diff --git a/docs/build/reference/transformer/Value/readParameter.md b/docs/build/reference/transformer/Value/readParameter.md index 088e0feed..089deec06 100644 --- a/docs/build/reference/transformer/Value/readParameter.md +++ b/docs/build/reference/transformer/Value/readParameter.md @@ -2,10 +2,12 @@ title: "Read parameter" description: "Reads a parameter from a Java Properties file." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # Read parameter + Reads a parameter from a Java Properties file. diff --git a/docs/build/reference/transformer/Value/uuid.md b/docs/build/reference/transformer/Value/uuid.md index 950657928..2a1d60a53 100644 --- a/docs/build/reference/transformer/Value/uuid.md +++ b/docs/build/reference/transformer/Value/uuid.md @@ -2,10 +2,12 @@ title: "UUID" description: "Generates UUIDs. If no input value is provided, a random UUID (type 4) is generated using a cryptographically strong pseudo random number generator. If input values are provided, a name-based UUID (type 3) is generated for each input value. Each input value will generate a separate UUID. For building a UUID from multiple inputs, the Concatenate operator can be used." icon: octicons/cross-reference-24 -tags: +tags: - TransformOperator --- + # UUID + Generates UUIDs. diff --git a/docs/build/reference/transformer/index.md b/docs/build/reference/transformer/index.md index 1a5267501..56be6304e 100644 --- a/docs/build/reference/transformer/index.md +++ b/docs/build/reference/transformer/index.md @@ -5,7 +5,9 @@ tags: - Build - Reference --- + # Transformers + Transform operators transform a one or more sequences of string values to a sequence of string values. diff --git a/docs/deploy-and-configure/configuration/dataintegration/activity-reference/index.md b/docs/deploy-and-configure/configuration/dataintegration/activity-reference/index.md index c78da92d1..6a682a4e7 100644 --- a/docs/deploy-and-configure/configuration/dataintegration/activity-reference/index.md +++ b/docs/deploy-and-configure/configuration/dataintegration/activity-reference/index.md @@ -18,7 +18,7 @@ Generates matches between schema paths and datasets based on the schema discover | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| datasetUri | String | If set, run dataset matching only for this particular dataset. | +| datasetUri | String | If set, run dataset matching only for this particular dataset. | | The identifier for this plugin is `DatasetMatcher`. @@ -47,13 +47,13 @@ Generates profiling data of a dataset, e.g. data types, statistics etc. | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| datasetUri | String | Optional URI of the dataset resource that should be profiled. If not specified an URI will be generated. | -| uriPrefix | String | Optional URI prefix that is prepended to every generated URI, e.g. property URIs for every schema path. If not specified an URI prefix will be generated. | -| entitySampleLimit | String | How many entities should be sampled for the profiling. If set to zero or a negative value, all entities will be considered. If left blank the configured default value is used. | -| timeLimit | String | The time in milliseconds that each of the schema extraction step and profiling step should spend on. Leave blank for unlimited time. | -| classProfilingLimit | int | The maximum number of classes that are profiled from the extracted schema. | -| schemaEntityLimit | int | The maximum number of overall schema entities (types, properties/attributes) that will be extracted. | -| executionType | String | The execution type to be used. At the moment, only 'LEGACY' is supported. | +| datasetUri | String | Optional URI of the dataset resource that should be profiled. If not specified an URI will be generated. | | +| uriPrefix | String | Optional URI prefix that is prepended to every generated URI, e.g. property URIs for every schema path. If not specified an URI prefix will be generated. | | +| entitySampleLimit | String | How many entities should be sampled for the profiling. If set to zero or a negative value, all entities will be considered. If left blank the configured default value is used. | | +| timeLimit | String | The time in milliseconds that each of the schema extraction step and profiling step should spend on. Leave blank for unlimited time. | | +| classProfilingLimit | int | The maximum number of classes that are profiled from the extracted schema. | | +| schemaEntityLimit | int | The maximum number of overall schema entities (types, properties/attributes) that will be extracted. | | +| executionType | String | The execution type to be used. At the moment, only 'LEGACY' is supported. | | The identifier for this plugin is `DatasetProfiler`. @@ -85,7 +85,7 @@ Executes an active learning iteration. | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| fixedRandomSeed | boolean | No description | +| fixedRandomSeed | boolean | No description | | The identifier for this plugin is `ActiveLearning`. @@ -97,7 +97,7 @@ Suggest comparison pairs for the current linking task. | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| fixedRandomSeed | boolean | No description | +| fixedRandomSeed | boolean | No description | | The identifier for this plugin is `ActiveLearning-ComparisonPairs`. @@ -109,13 +109,13 @@ Evaluates the linking task by generating links. | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| includeReferenceLinks | boolean | Do not generate a link for which there is a negative reference link while always generating positive reference links. | -| useFileCache | boolean | Use a file cache. This avoids memory overflows for big files. | -| partitionSize | int | The number of entities in a single partition in the cache. | -| generateLinksWithEntities | boolean | Generate detailed information about the matched entities. If set to false, the generated links won't be shown in the Workbench. | -| writeOutputs | boolean | Write the generated links to the configured output of this task. | -| linkLimit | int | If defined, the execution will stop after the configured number of links is reached.\This is just a hint and the execution may produce slightly fewer or more links. | -| timeout | int | Timeout in seconds after that the matching task of an evaluation should be aborted. Set to 0 or negative to disable the timeout. | +| includeReferenceLinks | boolean | Do not generate a link for which there is a negative reference link while always generating positive reference links. | | +| useFileCache | boolean | Use a file cache. This avoids memory overflows for big files. | | +| partitionSize | int | The number of entities in a single partition in the cache. | | +| generateLinksWithEntities | boolean | Generate detailed information about the matched entities. If set to false, the generated links won't be shown in the Workbench. | | +| writeOutputs | boolean | Write the generated links to the configured output of this task. | | +| linkLimit | int | If defined, the execution will stop after the configured number of links is reached.\This is just a hint and the execution may produce slightly fewer or more links. | | +| timeout | int | Timeout in seconds after that the matching task of an evaluation should be aborted. Set to 0 or negative to disable the timeout. | | The identifier for this plugin is `EvaluateLinking`. @@ -178,7 +178,7 @@ Executes the transformation. | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| limit | IntOptionParameter | Limits the maximum number of entities that are transformed. | +| limit | IntOptionParameter | Limits the maximum number of entities that are transformed. | | The identifier for this plugin is `ExecuteTransform`. @@ -221,9 +221,9 @@ Generate project and Spark assembly artifacts and deploy them using the specifie | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| executeStaging | boolean | Execute loading phase | -| executeTransform | boolean | Execute transform phase | -| executeLoading | boolean | Execute staging phase | +| executeStaging | boolean | Execute loading phase | | +| executeTransform | boolean | Execute transform phase | | +| executeLoading | boolean | Execute staging phase | | The identifier for this plugin is `DeploySparkWorkflow`. @@ -244,7 +244,7 @@ Executes a workflow on with an executor that uses Apache Spark. Depending on the | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| operator | TaskReference | The workflow to execute. | +| operator | TaskReference | The workflow to execute. | | The identifier for this plugin is `ExecuteSparkOperator`. @@ -265,9 +265,9 @@ Executes a workflow with custom payload. | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| configuration | MultilineStringParameter | No description | -| configurationType | String | No description | -| optionalPrimaryResourceManager | PluginObjectParameter | | +| configuration | MultilineStringParameter | No description | | +| configurationType | String | No description | | +| optionalPrimaryResourceManager | PluginObjectParameter | | | The identifier for this plugin is `ExecuteWorkflowWithPayload`. @@ -279,8 +279,8 @@ Generate and share a view on a workflow executed by the Spark executor. Executes | Parameter | Type | Description | Example | | ---------------------- | ------------- | ------------------ | -------------------------- | -| caching | boolean | Optional parameter that enables caching (default=false). | -| userDefinedName | String | Optional View name that is used when a view on a non virtual is generated (default = [TASK-ID]_generated_view). | +| caching | boolean | Optional parameter that enables caching (default=false). | | +| userDefinedName | String | Optional View name that is used when a view on a non virtual is generated (default = [TASK-ID]_generated_view). | | The identifier for this plugin is `GenerateSparkView`. diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md index 831a53a4a..b67e85b2f 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md @@ -85,7 +85,7 @@ key: | | Default | *none* | | Required | false | | Valid values | PGP Key (Message) | - | Conflicts with | license.file | +| Conflicts with | license.file | | Environment | LICENSE_KEY | ***Property: license.file*** @@ -97,7 +97,7 @@ Use this property to specify the location of the license file | Default | *none* | | Required | false | | Valid values | location of the license file | - | Conflicts with | license.key | +| Conflicts with | license.key | | Environment | LICENSE_FILE | ## General platform settings for DataPlatform @@ -446,7 +446,8 @@ This configures the possible chat models for interacting with the companion. | Category | Value | |--- | ---: | -| Default | { \"name\": \"string\", \"schema\": \"string\" } +| Default | { \"name\": \"string\", \"schema\": \"string\" } | + | | Required | false | | Valid values | string | @@ -474,7 +475,8 @@ This configures the possible chat models for interacting with the companion. | Category | Value | |--- | ---: | -| Default | { \"name\": \"string\", \"schema\": \"string\" } +| Default | { \"name\": \"string\", \"schema\": \"string\" } | + | | Required | false | | Valid values | string | @@ -868,14 +870,14 @@ Use this property to configure the URI of the public user (see section Public ac | Valid values | string | | Environment | AUTHORIZATION_ABOX_ANONYMOUSUSER | -#### Access conditions +### Access conditions **IMPORTANT:** The following properties are deprecated and have no function anymore! ***Property: authorization.abox.accessConditions.url*** **DEPRECATED** -Use this property to set the URL of the access conditions model file. This can be either a remote (http://...) or a local (file:...) .rdf file. Refer to section Access conditions for more information on the access conditions model. +Use this property to set the URL of the access conditions model file. This can be either a remote (...) or a local (file:...) .rdf file. Refer to section Access conditions for more information on the access conditions model. | Category | Value | |--- | ---: | @@ -895,7 +897,7 @@ Use this property to set the graph containing the access conditions model. | Default | | | Required | false | | Valid values | string | - | Conflicts with | url | +| Conflicts with | url | | Environment | AUTHORIZATION_ABOX_ACCESSCONDITIONS_GRAPH | ## SPARQL endpoints @@ -938,7 +940,7 @@ Use this property to specify which RDF properties should be used to provide labe | Category | Value | |--- | ---: | -| Default | [http://www.w3.org/2004/02/skos/core#prefLabel, http://www.w3.org/2000/01/rdf-schema#label, http://purl.org/dc/terms/title, http://www.w3.org/ns/shacl#name] | +| Default | [, , , ] | | Required | false | | Valid values | list of Properties | | Environment | PROXY_LABELPROPERTIES | @@ -950,7 +952,7 @@ Use this property to specify which RDF properties should be used to provide desc | Category | Value | |--- | ---: | -| Default | [http://purl.org/dc/terms/description, http://www.w3.org/2000/01/rdf-schema#comment] | +| Default | [, ] | | Required | false | | Valid values | list of Properties | | Environment | PROXY_DESCRIPTIONPROPERTIES | diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-oauth-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-oauth-full.md index 3128cc80e..bd8eddc4a 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-oauth-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-oauth-full.md @@ -85,7 +85,7 @@ Use this property to specify the claim identifying the roles (authorities) of th |--- | ---: | | Default | groups | | Required | false | -| Valid values | string | list of strings | +| Valid values | string | | Environment | SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_CLAIMS_GROUPS | ***Property: spring.security.oauth2.resourceserver.jwt.claims.clientId*** diff --git a/docs/deploy-and-configure/configuration/explore/graph-resource-pattern/index.md b/docs/deploy-and-configure/configuration/explore/graph-resource-pattern/index.md index cfb39ecea..d23d223de 100644 --- a/docs/deploy-and-configure/configuration/explore/graph-resource-pattern/index.md +++ b/docs/deploy-and-configure/configuration/explore/graph-resource-pattern/index.md @@ -1,5 +1,5 @@ --- -#icon: octicons/cross-reference-24 +# icon: octicons/cross-reference-24 tags: - Reference --- diff --git a/docs/develop/python-plugins/setup/index.md b/docs/develop/python-plugins/setup/index.md index f499d2fb9..08797f0f5 100644 --- a/docs/develop/python-plugins/setup/index.md +++ b/docs/develop/python-plugins/setup/index.md @@ -101,7 +101,7 @@ To do so, use the [`pip download`](https://pip.pypa.io/en/stable/cli/pip_downloa ??? note "Example shell session showing the usage of `pip download`" - ``` + ```text $ cat requirements.txt cmem-plugin-validation cmem-plugin-graphql diff --git a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md index 825dd30c9..ab0d807d1 100644 --- a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md +++ b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md @@ -4,7 +4,9 @@ tags: - Reference - Vocabulary --- + # Datatypes + This is a list of supported data types in shapes. @@ -13,13 +15,13 @@ This is a list of supported data types in shapes. Not all datatypes result in specific widgets. -#### anyURI +## anyURI The ·lexical space· of anyURI is finite-length character sequences which, when the algorithm defined in Section 5.4 of [XML Linking Language] is applied to them, result in strings which are legal URIs according to [RFC 2396], as amended by [RFC 2732]. Note: Spaces are, in principle, allowed in the ·lexical space· of anyURI, however, their use is highly discouraged (unless they are encoded by %20). IRI: `http://www.w3.org/2001/XMLSchema#anyURI` -#### base64Binary +### base64Binary The lexical forms of base64Binary values are limited to the 65 characters of the Base64 Alphabet defined in [RFC 2045], i.e., a-z, A-Z, 0-9, the plus sign (+), the forward slash (/) and the equal sign (=), together with the characters defined in [XML 1.0 (Second Edition)] as white space. No other characters are allowed. diff --git a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes/index.md b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes/index.md index 5592eeaf3..aa14354a3 100644 --- a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes/index.md +++ b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/node-shapes/index.md @@ -6,7 +6,9 @@ tags: - Reference - Vocabulary --- + # Node Shapes + Node Shapes are resources of type `shacl:NodeShape`. diff --git a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/property-shapes/index.md b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/property-shapes/index.md index fb06507e9..2fc4120fd 100644 --- a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/property-shapes/index.md +++ b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/property-shapes/index.md @@ -4,7 +4,9 @@ tags: - Reference - Vocabulary --- + # Property Shapes + Property Shapes are resources of type `shacl:PropertyShape`. diff --git a/docs/explore-and-author/graph-exploration/index.md b/docs/explore-and-author/graph-exploration/index.md index cc50cd166..6279699ed 100644 --- a/docs/explore-and-author/graph-exploration/index.md +++ b/docs/explore-and-author/graph-exploration/index.md @@ -15,13 +15,14 @@ To open the Explore module, click **:eccenca-application-explore: Knowledge G The user interface of the Explore module shows the following main areas:
-- the header area, showing: - - selected elements, - - possible actions (e.g. **:eccenca-item-add-artefact: create** or **:eccenca-item-remove: remove resource**), - - a **:eccenca-module-search: Go to resource** (2) input field, - - and a **:eccenca-application-useraccount: user menu** -- the navigation area, showing the [Graphs](#graphs) and the [Navigation](#navigation) structures, (1) -- the main area, providing multiple views, depending on which resource has been selected. + +- the header area, showing: + - selected elements, + - possible actions (e.g. **:eccenca-item-add-artefact: create** or **:eccenca-item-remove: remove resource**), + - a **:eccenca-module-search: Go to resource** (2) input field, + - and a **:eccenca-application-useraccount: user menu** +- the navigation area, showing the [Graphs](#graphs) and the [Navigation](#navigation) structures, (1) +- the main area, providing multiple views, depending on which resource has been selected.
1. If necessary, you can toggle the navigation area by using the @@ -57,11 +58,12 @@ You can search for a specific graph with **:eccenca-module-search: Search**. To add a new graph to the Graphs list:
-- Click **:eccenca-item-add-artefact: Add new graph**. A dialog appears. -- Select a graph type. (1) -- Provide a name and enter the graph URI (e.g. `https://ns.eccenca.com`). -- Click **Next** and provide metadata (different types, require different metadata to enter). -- Click **Save** to create the new graph. + +- Click **:eccenca-item-add-artefact: Add new graph**. A dialog appears. +- Select a graph type. (1) +- Provide a name and enter the graph URI (e.g. `https://ns.eccenca.com`). +- Click **Next** and provide metadata (different types, require different metadata to enter). +- Click **Save** to create the new graph.
1. More concrete, you select a shape here. @@ -82,13 +84,14 @@ Use this function to add or replace data in the a graph. To update or replace data of a graph:
-- In the **Graphs** box, select **:eccenca-item-download: Manage graph** on the graph you want to update or replace. -- A dialog box appears. -- Click **Choose file** to upload a file containing the new or updated data. (1) -- Choose one of the following options: - - **Update**: add uploaded data to Graph. - - **Replace**: clear Graph and add uploaded data. -- Click **Update** to start the upload process. + +- In the **Graphs** box, select **:eccenca-item-download: Manage graph** on the graph you want to update or replace. +- A dialog box appears. +- Click **Choose file** to upload a file containing the new or updated data. (1) +- Choose one of the following options: + - **Update**: add uploaded data to Graph. + - **Replace**: clear Graph and add uploaded data. +- Click **Update** to start the upload process.
1. You can upload one of the following file formats: Turtle, N-Triples, RDF/XML, or JSON-LD. diff --git a/docs/release-notes/corporate-memory-21-04/index.md b/docs/release-notes/corporate-memory-21-04/index.md index e6da032eb..64db2437f 100644 --- a/docs/release-notes/corporate-memory-21-04/index.md +++ b/docs/release-notes/corporate-memory-21-04/index.md @@ -103,10 +103,10 @@ This version of eccenca DataManager adds the following new features: In addition to that, these changes are shipped: - General - - Use redux store to manage notifications in DataManager (MessageHandler) and improve error parse / handle - - Use redux store to manage main application state. - - Change value of `js.config.modules.explore.overallSearchQuery` and `js.config.modules.explore.navigation.searchQuery` to use the `""""` SPARQL string separator. - - *BREAK* please use `"""` if you use custom queries for that values + - Use redux store to manage notifications in DataManager (MessageHandler) and improve error parse / handle + - Use redux store to manage main application state. + - Change value of `js.config.modules.explore.overallSearchQuery` and `js.config.modules.explore.navigation.searchQuery` to use the `""""` SPARQL string separator. + - *BREAK* please use `"""` if you use custom queries for that values - Development - Switch to GUI elements repository from Github diff --git a/docs/release-notes/corporate-memory-24-2/index.md b/docs/release-notes/corporate-memory-24-2/index.md index b0d3285e9..10f61513e 100644 --- a/docs/release-notes/corporate-memory-24-2/index.md +++ b/docs/release-notes/corporate-memory-24-2/index.md @@ -188,7 +188,7 @@ v24.2.0 of DataPlatform adds the following new features: - `validationResultsTargetGraph`: Graph to write rdf validation model into after batch finishes - `replace`: boolean value on whether to replace the graph (default: false) - Added option for SHACL Batch run to query target resources with a ignore list for OWL imports, `POST /api/shacl/validation/batches` - - `owlImportsIgnoreList`: A set of graph IRIs which are not queried in the resource selection (i.e. owl imports ignored) + - `owlImportsIgnoreList`: A set of graph IRIs which are not queried in the resource selection (i.e. owl imports ignored) - Added module Access-Control to workspace configuration - Existing module Administration split into workspace configuration and access control. - Existing administration module used for workspace configuration (as to avoid migration steps). diff --git a/docs/release-notes/corporate-memory-25-3/index.md b/docs/release-notes/corporate-memory-25-3/index.md index 0b628cf67..718fdd823 100644 --- a/docs/release-notes/corporate-memory-25-3/index.md +++ b/docs/release-notes/corporate-memory-25-3/index.md @@ -21,6 +21,7 @@ The highlights of this release are: - Explore: **Companion Chat-Based Data Interaction** - Introducing an **LLM-powered conversational interface** that lets you interact directly with your data. Ask questions about your graphs, explore insights using query catalogs or autogenerated queries, and access Corporate Memory resources and modules—all within chat. + This feature takes data accessibility and interaction to an entirely new level. - Explore: **Graph Insights** diff --git a/docs/testing.md b/docs/testing.md index 8827a813b..debc767f1 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -1,4 +1,5 @@ + # Testing Current issues: @@ -109,7 +110,7 @@ Current issues: - *isDromedary*: `false` - Input values: - 1. `[hello world]` + 1. `[hello world]` - Returns: @@ -154,8 +155,8 @@ Current issues: - *glue*: `-` - Input values: - 1. `[First]` - 2. `[Last]` + 1. `[First]` + 2. `[Last]` - Returns: @@ -168,13 +169,15 @@ Current issues: - *glue*: `\n\t\\` - Input values: - 1. `[a + 1. `[a \b, c]` - Returns: → `[a + \b + \c]` @@ -200,4 +203,3 @@ Current issues: \b \c] ``` - From 260ae6f5043226e442800b0daeaeff0c35d80548 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Wed, 17 Dec 2025 11:42:31 +0100 Subject: [PATCH 06/17] fix links --- .../define-the-interfaces/index.md | 8 ++++---- docs/build/tutorial-how-to-link-ids-to-osint/index.md | 2 +- .../lift-data-from-STIX-2.1-data-of-mitre-attack/index.md | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md index 1aaf6f1fb..23fe2db7c 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/define-the-interfaces/index.md @@ -31,7 +31,7 @@ The first dashboard to do for our use cases is the list of IoCs with classic SPL Here, the figure 3 is nice but before this first schema during the project, there are a lot of shemas and all were minimalist and ugly often only on a whiteboard. This type schema before the technical feasibility is only to validate the objective with the analysts before starting the development. During the technical feasibility, we can decrease/increase step-by-step your objectives to show finally a first result in figure 4 in a real dashboard.
-![Figure 4. First interface with only SPARQL queries in SPLUNK static tables.](./../link-IDS-event-to-KG/demo_ld_without_html.png) +![Figure 4. First interface with only SPARQL queries in SPLUNK static tables.](../link-IDS-event-to-KG/demo_ld_without_html.png)
Figure 4. First interface with only SPARQL queries in SPLUNK static tables.
@@ -169,12 +169,12 @@ During our project, we have implemented the SPARQL command necessary to execute With the interfaces, the available data and their links in head, the analyst can now imagine the necessary RDF models of concepts (for example, figure 7 and 8) in his future knowledge graph to generate expected dashboards. These RDF models evolve at the same time as the interfaces (forever) and according to future RDF standards of Cyber world. With Corporate Memory, after each evolution of your models, you can rebuild your knowledge graph "from scratch" when you want. Several RDF models of different versions can exist in your knowledge graphs, so you can update progressively your dashboards without service interruption of old dashboards.
-![Figure 7. RDF model of Mitre concept "course of action" in our future knowledge graph.](./../lift-data-from-STIX-2.1-data-of-mitre-attack/rdf-model-course-of-action.png) +![Figure 7. RDF model of Mitre concept "course of action" in our future knowledge graph.](../lift-data-from-STIX-2.1-data-of-mitre-attack/rdf-model-course-of-action.png)
Figure 7. RDF model of Mitre concept "course of action" in our future knowledge graph.
-![Figure 8. RDF model of concept "IoC Rule" in our future knowledge graph.](./../lift-data-from-YAML-data-of-hayabusa-sigma/23-1-rdf-model-rule.png) +![Figure 8. RDF model of concept "IoC Rule" in our future knowledge graph.](../lift-data-from-YAML-data-of-hayabusa-sigma/23-1-rdf-model-rule.png)
Figure 8. RDF model of concept "IoC Rule" in our future knowledge graph.
@@ -182,6 +182,6 @@ With the interfaces, the available data and their links in head, the analyst can Tutorial: [how to link Intrusion Detection Systems (IDS) to Open-Source INTelligence (OSINT)](../index.md) -Next chapter: [Build a Knowledge Graph from MITRE ATT&CK® datasets](./../lift-data-from-STIX-2.1-data-of-mitre-attack/index.md) +Next chapter: [Build a Knowledge Graph from MITRE ATT&CK® datasets](../lift-data-from-STIX-2.1-data-of-mitre-attack/index.md) Previous chapter: [Define the need, the expected result and the use cases](../define-the-need/index.md) diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/index.md index f3c56e550..ff046ff07 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/index.md @@ -44,4 +44,4 @@ For the part "Link IDS event to a knowledge graph in dashboards", you need to ha --- -Next chapter: [Define the need, the expected result and the use cases](./define-the-need/index.md) +Next chapter: [Define the need, the expected result and the use cases](define-the-need/index.md) diff --git a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md index 94411032a..8d176897b 100644 --- a/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md +++ b/docs/build/tutorial-how-to-link-ids-to-osint/lift-data-from-STIX-2.1-data-of-mitre-attack/index.md @@ -140,7 +140,7 @@ Create one RDF dataset for each Mitre dataset: Here, you will create all classes and attributes necessary in your use case case. Not more, not less. So, we are adding each STIX object in your knowledge base with its STIX type, its label, its description and its references. Each reference can have an url, a label, a description and an external ID, like Mitre ID or CAPEC ID. - In UML, you can represent your targeted model like that: here a RDF model to describe an instance of type "course-of-action" in MITRE ATT&CK. (you can download the [File drawio of schemas](./RDF_model_and_pattern.drawio)) + In UML, you can represent your targeted model like that: here a RDF model to describe an instance of type "course-of-action" in MITRE ATT&CK. (you can download the [File drawio of schemas](RDF_model_and_pattern.drawio)) ![RDF model to describe an instance of type "course-of-action" in MITRE ATT&CK](rdf-model-course-of-action.png) From c3d22506ba60fe3b5f191779a645d25ea5da7d5a Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Wed, 17 Dec 2025 11:55:47 +0100 Subject: [PATCH 07/17] integrate rumdl linting in github action pipeline --- .github/workflows/test.yml | 6 ++++++ .pre-commit-config.yaml | 4 ++-- Taskfile.yml | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5f8820d50..0f5593598 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -61,6 +61,12 @@ jobs: - name: check run: task check + - name: Publish Test Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v2 + with: + files: "dist/md-lint-issues.xml" + - name: build run: task clean build diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f700f269..1ec91637c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ repos: hooks: - id: markdownlint - name: check:markdownlint - entry: task check:markdownlint + name: check:rumdl + entry: task check:rumdl language: python pass_filenames: false diff --git a/Taskfile.yml b/Taskfile.yml index 4325ce3e1..1d24b2224 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -51,6 +51,7 @@ tasks: desc: Run complete test suite deps: - check:links + - check:rumdl check:links: desc: Check outgoing links From 87cc189d67f8b4c5d2c31e9ec00fcdbde250a53a Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Thu, 8 Jan 2026 09:30:08 +0100 Subject: [PATCH 08/17] fix indention and line breaks --- docs/build/integrations/index.md | 110 ++++++++++++++----------------- 1 file changed, 48 insertions(+), 62 deletions(-) diff --git a/docs/build/integrations/index.md b/docs/build/integrations/index.md index ebaa47c92..fa84f7a20 100644 --- a/docs/build/integrations/index.md +++ b/docs/build/integrations/index.md @@ -20,10 +20,7 @@ The following services and applications can be easily integrated in Corporate Me --- - Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task -to interact with any -[Anthropic / Claude provided Large Language Models](https://docs.claude.com/en/docs/about-claude/models/overview) -(LLMs). + Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task to interact with any [Anthropic / Claude provided Large Language Models](https://docs.claude.com/en/docs/about-claude/models/overview) (LLMs). - :other-apacheavro:{ .lg .middle } Avro @@ -35,15 +32,13 @@ to interact with any --- - Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task -to interact with any [Azure AI Foundry provided Large Language Models](https://ai.azure.com/catalog) (LLMs). + Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task to interact with any [Azure AI Foundry provided Large Language Models](https://ai.azure.com/catalog) (LLMs). - :fontawesome-solid-file-csv:{ .lg .middle } CSV --- - Comma-separated values (CSV) is a text data format which can be processed -(read and write) with the [CSV Dataset](../../build/reference/dataset/csv.md). + Comma-separated values (CSV) is a text data format which can be processed (read and write) with the [CSV Dataset](../../build/reference/dataset/csv.md). - :material-email-outline:{ .lg .middle } eMail / SMTP @@ -68,12 +63,12 @@ to interact with any [Azure AI Foundry provided Large Language Models](https://a --- Load and write Knowledge Graphs to an external GraphDB store by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. -Query data from GraphDB by using the SPARQL + Query data from GraphDB by using the SPARQL -[Construct](../../build/reference/customtask/sparqlCopyOperator.md), -[Select](../../build/reference/customtask/sparqlSelectOperator.md) and -[Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. -GraphDB can be used as the integrated Quad Store as well. + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), + [Select](../../build/reference/customtask/sparqlSelectOperator.md) and + [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. + GraphDB can be used as the integrated Quad Store as well. - :simple-graphql:{ .lg .middle } GraphQL @@ -109,8 +104,7 @@ GraphDB can be used as the integrated Quad Store as well. --- - You can [send](../../build/reference/customtask/cmem_plugin_kafka-SendMessages.md) and -[receive messages](../../build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md) to and from a Kafka topic. + You can [send](../../build/reference/customtask/cmem_plugin_kafka-SendMessages.md) and [receive messages](../../build/reference/customtask/cmem_plugin_kafka-ReceiveMessages.md) to and from a Kafka topic. - :simple-kubernetes:{ .lg .middle } Kubernetes @@ -122,29 +116,25 @@ GraphDB can be used as the integrated Quad Store as well. --- - MariaDB can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a -[JDBC driver](https://central.sonatype.com/artifact/org.mariadb.jdbc/mariadb-java-client/overview). + MariaDB can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://central.sonatype.com/artifact/org.mariadb.jdbc/mariadb-java-client/overview). - :simple-mattermost:{ .lg .middle } Mattermost --- - Send workflow reports or any other message to user and groups in you Mattermost with -the [Send Mattermost messages](../../build/reference/customtask/cmem_plugin_mattermost.md) task. + Send workflow reports or any other message to user and groups in you Mattermost with the [Send Mattermost messages](../../build/reference/customtask/cmem_plugin_mattermost.md) task. - :material-microsoft:{ .lg .middle } Microsoft SQL --- - The Microsoft SQL Server can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a -[JDBC driver](https://central.sonatype.com/artifact/com.microsoft.sqlserver/mssql-jdbc). + The Microsoft SQL Server can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://central.sonatype.com/artifact/com.microsoft.sqlserver/mssql-jdbc). - :simple-mysql:{ .lg .middle } MySQL --- - MySQL can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a -[JDBC driver](https://central.sonatype.com/artifact/org.mariadb.jdbc/mariadb-java-client/overview). + MySQL can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://central.sonatype.com/artifact/org.mariadb.jdbc/mariadb-java-client/overview). - :simple-neo4j:{ .lg .middle } Neo4J @@ -157,19 +147,18 @@ the [Send Mattermost messages](../../build/reference/customtask/cmem_plugin_matt --- Load and write Knowledge Graphs to Amazon Neptune by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. -Query data from Amazon Neptune by using the SPARQL + Query data from Amazon Neptune by using the SPARQL -[Construct](../../build/reference/customtask/sparqlCopyOperator.md), -[Select](../../build/reference/customtask/sparqlSelectOperator.md) and -[Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. -Amazon Neptune can be used as the integrated Quad Store as well (beta). + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), + [Select](../../build/reference/customtask/sparqlSelectOperator.md) and + [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. + Amazon Neptune can be used as the integrated Quad Store as well (beta). - :simple-nextcloud:{ .lg .middle } Nextcloud --- - Use a Nextcloud instance to [download files](../../build/reference/customtask/cmem_plugin_nextcloud-Download.md) to process -them or [upload files](../../build/reference/customtask/cmem_plugin_nextcloud-Upload.md) you created with Corporate Memory. + Use a Nextcloud instance to [download files](../../build/reference/customtask/cmem_plugin_nextcloud-Download.md) to process them or [upload files](../../build/reference/customtask/cmem_plugin_nextcloud-Upload.md) you created with Corporate Memory. - :material-microsoft-office:{ .lg .middle } Office 365 @@ -181,15 +170,13 @@ them or [upload files](../../build/reference/customtask/cmem_plugin_nextcloud-Up --- - Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task -to interact with Ollama provided Large Language Models (LLMs). + Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task to interact with Ollama provided Large Language Models (LLMs). - :simple-openai:{ .lg .middle } OpenAI --- - Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task -to interact with any [OpenAI provided Large Language Models](https://platform.openai.com/docs/models) (LLMs). + Use the [Execute Instructions](../../build/reference/customtask/cmem_plugin_llm-ExecuteInstructions.md) or [Create Embeddings](../../build/reference/customtask/cmem_plugin_llm-CreateEmbeddings.md) task to interact with any [OpenAI provided Large Language Models](https://platform.openai.com/docs/models) (LLMs). - :octicons-ai-model-24:{ .lg .middle } OpenRouter @@ -215,49 +202,49 @@ to interact with any [OpenRouter provided Large Language Models](https://openrou --- Store vector embeddings into [pgvector](https://github.com/pgvector/pgvector) -using the [Search Vector Embeddings](../../build/reference/customtask/cmem_plugin_pgvector-Search.md). + using the [Search Vector Embeddings](../../build/reference/customtask/cmem_plugin_pgvector-Search.md). - :simple-postgresql:{ .lg .middle } PostgreSQL --- PostgreSQL can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a -[JDBC driver](https://central.sonatype.com/artifact/org.postgresql/postgresql/versions). + [JDBC driver](https://central.sonatype.com/artifact/org.postgresql/postgresql/versions). - :other-powerbi:{ .lg .middle } PowerBI --- Leverage your Knowledge Graphs in PowerBI by using our -[Corporate Memory Power-BI-Connector](../../consume/consuming-graphs-in-power-bi/index.md). + [Corporate Memory Power-BI-Connector](../../consume/consuming-graphs-in-power-bi/index.md). - :other-qlever:{ .lg .middle } Qlever --- Load and write Knowledge Graphs to an external Qlever store by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. -Query data from Qlever by using the SPARQL + Query data from Qlever by using the SPARQL -[Construct](../../build/reference/customtask/sparqlCopyOperator.md), -[Select](../../build/reference/customtask/sparqlSelectOperator.md) and -[Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. -Qlever can be used as the integrated Quad Store as well (beta). + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), + [Select](../../build/reference/customtask/sparqlSelectOperator.md) and + [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. + Qlever can be used as the integrated Quad Store as well (beta). - :simple-semanticweb:{ .lg .middle } RDF --- Use the [RDF file](../../build/reference/dataset/file.md) dataset to read and write files in the RDF formats -([N-Quads](https://www.w3.org/TR/n-quads/), [N-Triples](https://www.w3.org/TR/n-triples/), -[Turtle](https://www.w3.org/TR/turtle/), [RDF/XML](https://www.w3.org/TR/rdf-syntax-grammar/) or -[RDF/JSON](https://www.w3.org/TR/rdf-json/)). + ([N-Quads](https://www.w3.org/TR/n-quads/), [N-Triples](https://www.w3.org/TR/n-triples/), + [Turtle](https://www.w3.org/TR/turtle/), [RDF/XML](https://www.w3.org/TR/rdf-syntax-grammar/) or + [RDF/JSON](https://www.w3.org/TR/rdf-json/)). - :other-redash:{ .lg .middle } Redash --- Leverage your Knowledge Graphs in Redash using the integrated -[Corporate Memory Redash-Connector](../../consume/consuming-graphs-with-redash/index.md). + [Corporate Memory Redash-Connector](../../consume/consuming-graphs-with-redash/index.md). - :material-application-braces-outline:{ .lg .middle } REST @@ -269,15 +256,14 @@ Qlever can be used as the integrated Quad Store as well (beta). --- - Interact with your Salesforce data, such as [Create/Update Salesforce Objects](../../build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md) or -execute a [SOQL query (Salesforce)](../../build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md). + Interact with your Salesforce data, such as [Create/Update Salesforce Objects](../../build/reference/customtask/cmem_plugin_salesforce-workflow-operations-SobjectCreate.md) or execute a [SOQL query (Salesforce)](../../build/reference/customtask/cmem_plugin_salesforce-SoqlQuery.md). - :simple-snowflake:{ .lg .middle } Snowflake --- Snowflake can be accessed with the [Snowflake SQL endpoint](../../build/reference/dataset/SnowflakeJdbc.md) dataset and a -[JDBC driver](https://central.sonatype.com/artifact/net.snowflake/snowflake-jdbc). + [JDBC driver](https://central.sonatype.com/artifact/net.snowflake/snowflake-jdbc). - :simple-apachespark:{ .lg .middle } Spark @@ -290,7 +276,7 @@ execute a [SOQL query (Salesforce)](../../build/reference/customtask/cmem_plugin --- SQLite can be accessed with the [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a -[JDBC driver](https://central.sonatype.com/artifact/org.xerial/sqlite-jdbc). + [JDBC driver](https://central.sonatype.com/artifact/org.xerial/sqlite-jdbc). - :material-ssh:{ .lg .middle } SSH @@ -303,38 +289,38 @@ execute a [SOQL query (Salesforce)](../../build/reference/customtask/cmem_plugin --- Load and write Knowledge Graphs to an external Tentris store by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. -Query data from Tentris by using the SPARQL + Query data from Tentris by using the SPARQL -[Construct](../../build/reference/customtask/sparqlCopyOperator.md), -[Select](../../build/reference/customtask/sparqlSelectOperator.md) and -[Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. -Tentris can be used as the integrated Quad Store as well (beta). + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), + [Select](../../build/reference/customtask/sparqlSelectOperator.md) and + [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. + Tentris can be used as the integrated Quad Store as well (beta). - :simple-trino:{ .lg .middle } Trino --- [Trino](https://github.com/trinodb/trino) can be access with the -[Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://trino.io/docs/current/client/jdbc.html). + [Remote SQL endpoint](../../build/reference/dataset/Jdbc.md) dataset and a [JDBC driver](https://trino.io/docs/current/client/jdbc.html). - :black_large_square:{ .lg .middle } Virtuoso --- Load and write Knowledge Graphs to an external Openlink Virtuoso store by using the [SPARQL endpoint](../../build/reference/dataset/sparqlEndpoint.md) dataset. -Query data from Virtuoso by using the SPARQL + Query data from Virtuoso by using the SPARQL -[Construct](../../build/reference/customtask/sparqlCopyOperator.md), -[Select](../../build/reference/customtask/sparqlSelectOperator.md) and -[Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. -Virtuoso can be used as the integrated Quad Store as well (beta). + [Construct](../../build/reference/customtask/sparqlCopyOperator.md), + [Select](../../build/reference/customtask/sparqlSelectOperator.md) and + [Update](../../build/reference/customtask/sparqlUpdateOperator.md) tasks. + Virtuoso can be used as the integrated Quad Store as well (beta). - :material-xml:{ .lg .middle } XML --- Load and write data to XML files with the [XML](../../build/reference/dataset/xml.md) dataset as well as -[Parse XML](../../build/reference/customtask/XmlParserOperator.md) from external services. + [Parse XML](../../build/reference/customtask/XmlParserOperator.md) from external services. - :simple-yaml:{ .lg .middle } YAML From c4932500ac035a2608941d95d30f3a74115da0b8 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Thu, 8 Jan 2026 14:23:56 +0100 Subject: [PATCH 09/17] some XP adjustments, https://jira.eccenca.com/browse/CMEM-7248 --- .../explore/dataplatform/application-full.md | 14 ++++++-------- .../dataplatform/application-graphdb-full.md | 12 ++++++++++++ .../explore/dataplatform/application-http-full.md | 1 + .../dataplatform/application-inmemory-full.md | 1 + .../dataplatform/application-neptune-full.md | 1 + .../dataplatform/application-virtuoso-full.md | 1 + .../datatype-reference/index.md | 4 ++-- 7 files changed, 24 insertions(+), 10 deletions(-) diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md index b67e85b2f..07c731d28 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md @@ -876,8 +876,7 @@ Use this property to configure the URI of the public user (see section Public ac ***Property: authorization.abox.accessConditions.url*** -**DEPRECATED** -Use this property to set the URL of the access conditions model file. This can be either a remote (...) or a local (file:...) .rdf file. Refer to section Access conditions for more information on the access conditions model. +**Deprecation:** Use this property to set the URL of the access conditions model file. This can be either a remote (...) or a local (file:...) .rdf file. Refer to section Access conditions for more information on the access conditions model. | Category | Value | |--- | ---: | @@ -888,8 +887,7 @@ Use this property to set the URL of the access conditions model file. This can b ***Property: authorization.abox.accessConditions.graph*** -**DEPRECATED** -Use this property to set the graph containing the access conditions model. +**Deprecation:** Use this property to set the graph containing the access conditions model. **Note:** If you change this property, you also need to change the corresponding shape definitions for access conditions (more precisely, the UI SPARQL queries). | Category | Value | @@ -1220,7 +1218,7 @@ The committer name which appears in the commit message on system commits ***Property: gitSync.committerEmail*** -The committer email which appears in the commit message on system commits +The committer email which appears in the commit message on system commits | Category | Value | |--- | ---: | @@ -1235,7 +1233,7 @@ Schedules Pull Frequency - Configured git repositories for sync are pulled regul | Category | Value | |--- | ---: | -| Default | 0 */30* ** * | +| Default | 0 */30 * * * * | | Required | false | | Valid values | Cron setting according to | | Environment | GITSYNC_SCHEDULEDPULLCRON | @@ -1610,7 +1608,7 @@ Bulk upload Pool Size - Limits how many (bulk/large) uploads via GSP / bulk load ***Property: scheduler.analyticalPoolSize*** -Limits how many analytical requests can be run in parallel. Analytical requests can have longer runtimes than retrieval requests. +Limits how many analytical requests can be run in parallel. Analytical requests can have longer runtimes than retrieval requests. | Category | Value | |--- | ---: | @@ -1694,7 +1692,7 @@ One of the supported types of backends DataPlatform can connect to |--- | ---: | | Default | *none* | | Required | true | -| Valid values | MEMORY, HTTP, GRAPHDB, VIRTUOSO, NEPTUNE | +| Valid values | MEMORY, HTTP, GRAPHDB, VIRTUOSO, NEPTUNE, TENTRIS | | Environment | STORE_TYPE | ***Property: store.owlImportsResolution*** diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md index 7016879af..b69b35af5 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md @@ -128,6 +128,17 @@ Set to true to use the native Graph Store API endpoint. Set to false to use the | Valid values | boolean | | Environment | STORE_GRAPHDB_USEDIRECTTRANSFER | +***Property: store.graphdb.useStatementParallelGspWrite*** + +Whether to use the parallel GSP write endpoint for statements. This is only relevant when `useDirectTransfer` is true. + +| Category | Value | +|--- | ---: | +| Default | false | +| Required | false | +| Valid values | boolean | +| Environment | STORE_GRAPHDB_USESTATEMENTPARALLELGSPWRITE | + ***Property: store.graphdb.create-repository-on-startup*** Whether to create the given repository on startup if it does not exist @@ -171,3 +182,4 @@ Maximum amount of quads of change tracking result which will be loaded in memory | Required | false | | Valid values | int | | Environment | STORE_GRAPHDB_GRAPHDBCHANGETRACKINGMAXQUADMEMORY | + diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md index 419f03ca1..3abc60808 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md @@ -120,3 +120,4 @@ Defines how the raw list of graphs is retrieved, and therefore which graphs are | Required | false | | Valid values | Valid SPARQL query with bound variable "g" | | Environment | STORE_HTTP_GRAPHLISTQUERY | + diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md index 29ff13f1e..c75c310f2 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md @@ -53,3 +53,4 @@ list of files in file URI scheme | Required | false | | Valid values | A list of files | | Environment | STORE_MEMORY_FILES | + diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md index d3c21fdbf..686711302 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md @@ -138,3 +138,4 @@ The degree of parallelism (CPU) for the neptune loader, possible values are LOW, | Required | false | | Valid values | LOW, MEDIUM, HIGH, OVERSUBSCRIBE | | Environment | STORE_NEPTUNE_S3_BULKLOADPARALLELISM | + diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md index 873286ce2..e11f260b2 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md @@ -100,3 +100,4 @@ The credentials of the given user | Required | false | | Valid values | string | | Environment | STORE_VIRTUOSO_PASSWORD | + diff --git a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md index ab0d807d1..9a6c9b574 100644 --- a/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md +++ b/docs/explore-and-author/graph-exploration/building-a-customized-user-interface/datatype-reference/index.md @@ -17,7 +17,7 @@ This is a list of supported data types in shapes. ## anyURI -The ·lexical space· of anyURI is finite-length character sequences which, when the algorithm defined in Section 5.4 of [XML Linking Language] is applied to them, result in strings which are legal URIs according to [RFC 2396], as amended by [RFC 2732]. Note: Spaces are, in principle, allowed in the ·lexical space· of anyURI, however, their use is highly discouraged (unless they are encoded by %20). +The ·lexical space· of anyURI is finite-length character sequences which, when the algorithm defined in Section 5.4 of [XML Linking Language] is applied to them, result in strings which are legal URIs according to [RFC 2396], as amended by [RFC 2732]. Note: Spaces are, in principle, allowed in the ·lexical space· of anyURI, however, their use is highly discouraged (unless they are encoded by %20). IRI: `http://www.w3.org/2001/XMLSchema#anyURI` @@ -53,7 +53,7 @@ IRI: `http://www.w3.org/2001/XMLSchema#dateTime` #### dateTimeStamp -The lexical space of dateTimeStamp consists of strings which are in the ·lexical space· of dateTime and which also match the regular expression '.*(Z|[+|-][0-9](0-9):[0-9][0-9])' +The lexical space of dateTimeStamp consists of strings which are in the ·lexical space· of dateTime and which also match the regular expression `.*(Z|(+|-)[0-9][0-9]:[0-9][0-9])` IRI: `http://www.w3.org/2001/XMLSchema#dateTimeStamp` From 4c23c91a9579c949f32c4b79bec319b6e49b2591 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Thu, 8 Jan 2026 14:25:39 +0100 Subject: [PATCH 10/17] some XP adjustments, https://jira.eccenca.com/browse/CMEM-7248 --- .../explore/dataplatform/application-graphdb-full.md | 1 - .../explore/dataplatform/application-http-full.md | 1 - .../explore/dataplatform/application-inmemory-full.md | 3 +-- .../explore/dataplatform/application-neptune-full.md | 1 - .../explore/dataplatform/application-virtuoso-full.md | 1 - .../configuration/explore/dataplatform/index.md | 1 + 6 files changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md index b69b35af5..c89c31d31 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-graphdb-full.md @@ -182,4 +182,3 @@ Maximum amount of quads of change tracking result which will be loaded in memory | Required | false | | Valid values | int | | Environment | STORE_GRAPHDB_GRAPHDBCHANGETRACKINGMAXQUADMEMORY | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md index 3abc60808..419f03ca1 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-http-full.md @@ -120,4 +120,3 @@ Defines how the raw list of graphs is retrieved, and therefore which graphs are | Required | false | | Valid values | Valid SPARQL query with bound variable "g" | | Environment | STORE_HTTP_GRAPHLISTQUERY | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md index c75c310f2..28a965e0b 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-inmemory-full.md @@ -18,7 +18,7 @@ store: authorization: REWRITE_FROM memory: files: - - "/data/data.trig" + - "/data/data.trig" ``` ***Property: store.type*** @@ -53,4 +53,3 @@ list of files in file URI scheme | Required | false | | Valid values | A list of files | | Environment | STORE_MEMORY_FILES | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md index 686711302..d3c21fdbf 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-neptune-full.md @@ -138,4 +138,3 @@ The degree of parallelism (CPU) for the neptune loader, possible values are LOW, | Required | false | | Valid values | LOW, MEDIUM, HIGH, OVERSUBSCRIBE | | Environment | STORE_NEPTUNE_S3_BULKLOADPARALLELISM | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md index e11f260b2..873286ce2 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-virtuoso-full.md @@ -100,4 +100,3 @@ The credentials of the given user | Required | false | | Valid values | string | | Environment | STORE_VIRTUOSO_PASSWORD | - diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/index.md b/docs/deploy-and-configure/configuration/explore/dataplatform/index.md index 669eb0af2..c4b97f14e 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/index.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/index.md @@ -2,6 +2,7 @@ tags: - Configuration --- + # Explore backend (DataPlatform) This manual describes how to install and set up eccenca Explore backend (DataPlatform). From d9fdd000fc92b491dd0bb89444fc641d2ee19d59 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Thu, 8 Jan 2026 16:26:46 +0100 Subject: [PATCH 11/17] fix XP issues, https://jira.eccenca.com/browse/CMEM-7248 --- .../configuration/explore/dataplatform/application-full.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md b/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md index 07c731d28..babe47c82 100644 --- a/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md +++ b/docs/deploy-and-configure/configuration/explore/dataplatform/application-full.md @@ -457,7 +457,7 @@ This configures the possible chat models for interacting with the companion. | Category | Value | |--- | ---: | -| Default | | +| Default | | | Required | false | | Valid values | string | | Environment | SPRING_AI_AZURE_OPENAI_ENDPOINT | From a84d8f8997763124334e40540daa70d2831ae354 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Tue, 27 Jan 2026 17:16:26 +0100 Subject: [PATCH 12/17] rumdl auto-fixes --- .../command-reference/package/index.md | 52 ++++--------------- 1 file changed, 9 insertions(+), 43 deletions(-) diff --git a/docs/automate/cmemc-command-line-interface/command-reference/package/index.md b/docs/automate/cmemc-command-line-interface/command-reference/package/index.md index afa1c86e2..73fdc796c 100644 --- a/docs/automate/cmemc-command-line-interface/command-reference/package/index.md +++ b/docs/automate/cmemc-command-line-interface/command-reference/package/index.md @@ -6,24 +6,21 @@ tags: - cmemc - Package --- + # package Command Group + List, (un)install, export, create, or inspect packages. - ## package create Initialize an empty package directory with a minimal manifest. ```shell-session title="Usage" -$ cmemc package create [OPTIONS] PACKAGE_ID +cmemc package create [OPTIONS] PACKAGE_ID ``` - - - - ??? info "Options" ```text @@ -40,13 +37,9 @@ $ cmemc package create [OPTIONS] PACKAGE_ID Inspect the manifest of a package. ```shell-session title="Usage" -$ cmemc package inspect [OPTIONS] PACKAGE_PATH +cmemc package inspect [OPTIONS] PACKAGE_PATH ``` - - - - ??? info "Options" ```text @@ -59,13 +52,9 @@ $ cmemc package inspect [OPTIONS] PACKAGE_PATH List installed packages. ```shell-session title="Usage" -$ cmemc package list [OPTIONS] +cmemc package list [OPTIONS] ``` - - - - ??? info "Options" ```text @@ -82,16 +71,11 @@ $ cmemc package list [OPTIONS] Install packages. ```shell-session title="Usage" -$ cmemc package install [OPTIONS] [PACKAGE_ID] +cmemc package install [OPTIONS] [PACKAGE_ID] ``` - - - This command installs a package either from the marketplace or from local package archives (.cpa) or directories. - - ??? info "Options" ```text @@ -105,13 +89,9 @@ This command installs a package either from the marketplace or from local packag Uninstall installed packages. ```shell-session title="Usage" -$ cmemc package uninstall [OPTIONS] [PACKAGE_ID] +cmemc package uninstall [OPTIONS] [PACKAGE_ID] ``` - - - - ??? info "Options" ```text @@ -130,10 +110,6 @@ Export installed packages to package directories. $ cmemc package export [OPTIONS] [PACKAGE_ID] ``` - - - - ??? info "Options" ```text @@ -149,18 +125,13 @@ $ cmemc package export [OPTIONS] [PACKAGE_ID] Build a package archive from a package directory. ```shell-session title="Usage" -$ cmemc package build [OPTIONS] PACKAGE_DIRECTORY +cmemc package build [OPTIONS] PACKAGE_DIRECTORY ``` - - - This command processes a package directory, validates its content including the manifest, and creates a versioned Corporate Memory package archive (.cpa) with the following naming convention: {package_id}-v{version}.cpa Package archives can be published to the marketplace using the `package publish` command. - - ??? info "Options" ```text @@ -175,16 +146,11 @@ Package archives can be published to the marketplace using the `package publish` Publish a package archive to the marketplace server. ```shell-session title="Usage" -$ cmemc package publish [OPTIONS] PACKAGE_ARCHIVE +cmemc package publish [OPTIONS] PACKAGE_ARCHIVE ``` - - - - ??? info "Options" ```text --marketplace-url TEXT Alternative Marketplace URL. ``` - From 1d2d702adfe53f86f1ff2acfd549869d82f1fc16 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Tue, 27 Jan 2026 17:16:46 +0100 Subject: [PATCH 13/17] rumdl auto-fixes --- docs/develop/marketplace-packages/index.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/develop/marketplace-packages/index.md b/docs/develop/marketplace-packages/index.md index e00af74b5..ca44753db 100644 --- a/docs/develop/marketplace-packages/index.md +++ b/docs/develop/marketplace-packages/index.md @@ -14,13 +14,13 @@ Starting with version 26.1, we support the creation and use of Marketplace Packa Marketplace Packages bundle everything for a specific Corporate Memory–based solution or project into a single shareable, managed artifact: -- Vocabularies / Ontologies -- (SKOS) Taxonomies -- (Instance / Data) Graphs -- Build Projects -- Dependencies on - - [python-plugins](../python-plugins/index.md) - - (other) Marketplace Packages +- Vocabularies / Ontologies +- (SKOS) Taxonomies +- (Instance / Data) Graphs +- Build Projects +- Dependencies on + - [python-plugins](../python-plugins/index.md) + - (other) Marketplace Packages The lifecycle of a Corporate Memory Marketplace Package is shown in the following flowchart. @@ -30,7 +30,7 @@ The following pages give an overview of this feature:
-- :material-download-circle-outline: [Installation and Management](installation/index.md) +- :material-download-circle-outline: [Installation and Management](installation/index.md) --- @@ -38,7 +38,7 @@ The following pages give an overview of this feature: This section discusses the lifecycle commands and stages `install`, `list` and `uninstall`. -- :material-code-json: [Development and Publication](development/index.md) +- :material-code-json: [Development and Publication](development/index.md) --- From d25b6845479f4b9a6fb047c875984562c5ccd907 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Mon, 23 Feb 2026 15:18:00 +0100 Subject: [PATCH 14/17] add ``, order elements, CMEM-7248 --- .markdownlint.jsonc | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.markdownlint.jsonc b/.markdownlint.jsonc index 6d99b01fb..525939c80 100644 --- a/.markdownlint.jsonc +++ b/.markdownlint.jsonc @@ -13,16 +13,17 @@ }, "MD033": { "allowed_elements": [ - "figure", - "figcaption", - "div", - "details", - "summary", "a", - "p", + "br", + "cite", + "details", + "div", + "figcaption", + "figure", "img", + "p", "span", - "br" + "summary" ] }, "MD035": false, From 4948af871576f1e3a1846d7b83f046bcbe8d10a3 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Wed, 25 Feb 2026 18:22:00 +0100 Subject: [PATCH 15/17] apply auto-fixes --- .../features/application-settings.md | 8 ++--- .../graph-insights/features/canvas-ui.md | 7 ++-- .../graph-insights/features/category-tree.md | 10 +++--- .../graph-insights/features/connections.md | 16 +++++----- .../graph-insights/features/groups.md | 18 +++++------ .../graph-insights/features/index.md | 4 +-- .../graph-insights/features/objects-table.md | 6 ++-- .../graph-insights/features/objects.md | 27 ++++++++-------- .../graph-insights/features/persistence.md | 27 ++++++++-------- .../graph-exploration/graph-insights/index.md | 8 ++--- .../graph-insights/tutorial.md | 32 ++++++++++--------- 11 files changed, 83 insertions(+), 80 deletions(-) diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/application-settings.md b/docs/explore-and-author/graph-exploration/graph-insights/features/application-settings.md index e5209d430..314d1922f 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/application-settings.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/application-settings.md @@ -15,11 +15,11 @@ Access global configuration via the top-left dropdown menu. ### Troubleshoot and Reset -1. **Reset local data:** Clears browser-side preferences (e.g., columns visibility, width, and position). -2. **Reset graph data:** Performs a "Factory Reset" of the analysis for the current dataset. +1. **Reset local data:** Clears browser-side preferences (e.g., columns visibility, width, and position). +2. **Reset graph data:** Performs a "Factory Reset" of the analysis for the current dataset. !!! danger "Destructive Action" - + **Reset graph data** permanently deletes all [user-defined classes](persistence.md#custom-categories) and [saved explorations](persistence.md#saved-explorations) associated with the current database. This cannot be undone. @@ -42,4 +42,4 @@ When no group or connection is selected, the right pane displays the **database ![Database Summary Panel](../assets/database-summary.png){ class="bordered" width="30%" } -**Sorting:** Use the `Sort by` dropdown to order lists by `Name` (alphabetical) or `Quantity` (cardinality) to identify the largest entities. \ No newline at end of file +**Sorting:** Use the `Sort by` dropdown to order lists by `Name` (alphabetical) or `Quantity` (cardinality) to identify the largest entities. diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/canvas-ui.md b/docs/explore-and-author/graph-exploration/graph-insights/features/canvas-ui.md index b2e4b8377..cdda1653d 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/canvas-ui.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/canvas-ui.md @@ -28,7 +28,7 @@ Graph Insights maintains a session history stack. ![Undo Redo Controls](../assets/undo-redo.png){ class="bordered" width="40%" } !!! warning "Rebuild on Undo" - + Some operations (e.g., [user-defined classes](persistence.md#custom-categories) operations) have persistent side effects that may require a rebuild of the exploration state when moving back in the history. @@ -48,6 +48,7 @@ If a group contains *more* resources than the limit, it is rendered as a single ### Instance Rendering (Count ≤ Threshold) If the count is *below* the limit, each resource is rendered as a dot within the group disk. + - The number inside each dot represents its [predecessor](objects.md#predecessors) count (number of resources in the left group linked by the connection). No number means there is only a single predecessor. - The resources with higher predecessor counts are located at the center of the group. @@ -65,8 +66,8 @@ Locate specific resources **among those currently visible** in the groups of the ![Search Visible Objects](../assets/search-objects.png){ class="bordered" width="65%" } !!! tip "Flags Management" - + Use the `Temporary flags` menu that appears on the top left of the canvas to manage the persistence of the temporary flags. ---- \ No newline at end of file +--- diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/category-tree.md b/docs/explore-and-author/graph-exploration/graph-insights/features/category-tree.md index 3c13cfc1f..19122bc43 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/category-tree.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/category-tree.md @@ -1,13 +1,13 @@ -# All Explorations Start from the Class Tree +# All Explorations Start from the Class Tree The left pane is the **starting point of any exploration**, whether you want to **begin a fresh exploration** with a group from a class of the taxonomy or **recall a pre-existing exploration**. ![Sidebar Overview](../assets/left-sidebar.png){ class="bordered" width="30%" } !!! warning "Further Information" - + This section focuses on the **class tree**. See the persistence page for **[saved explorations](persistence.md#saved-explorations)** and **[user-defined classes](persistence.md#custom-categories)**. @@ -25,7 +25,7 @@ The tree displays the hierarchical taxonomy of the dataset: The tree is the primary tool to populate the canvas: -- **Drag and drop (Start):** Dragging a class to an empty area clears the canvas and starts a new exploration with a group of the selected class. +- **Drag and drop (Start):** Dragging a class to an empty area clears the canvas and starts a new exploration with a group of the selected class. *Alternatives: Double-click the class or use its context menu item `Start exploration with class`.* - **Drag and drop (Intersect):** Drag a class onto an **existing** group to apply an intersection filter (e.g., dragging `German Suppliers` onto `VIP Suppliers` restricts the group to resources with **both classes**). @@ -41,12 +41,12 @@ The tree is the primary tool to populate the canvas: - **Outcome:** The item turns **light gray**. Expansion steps will skip this class and offer **its direct sub-class** instead. !!! danger "Caution" - + The resources that have **only deactivated classes** are **no longer reachable** via an exploration. !!! tip "Hiding Deactivated Items" - + You can toggle the visibility of deactivated items in the tree via the [application settings](application-settings.md#settings). diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/connections.md b/docs/explore-and-author/graph-exploration/graph-insights/features/connections.md index 26b4428bc..3d8e8b4b8 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/connections.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/connections.md @@ -16,12 +16,12 @@ When a group with visible resources is expanded, or when a connected group direc ![Connectivity Indicator](../assets/objects-without-successor.png){ class="bordered" width="75%" } !!! info "Visual Partition" - + This indicator provides an instant "Visual Partition" of the set. For example, expanding `Supplier` → `Product` and filtering the products to `Confections` or `Beverages` dims all Suppliers who do not provide confections nor beverages. !!! tip "Missing Connections" - + This feature is useful to spot single resources that are missing expected connections (the expected connections missing at the group level can be identified in the expansion menu of the group). @@ -29,7 +29,7 @@ When a group with visible resources is expanded, or when a connected group direc ## Left Group Restriction (Backpropagation) {#backpropagation} -By default, the exploration applies a **left-join pattern**: The left group remains static, while the right group contains the connected resources. The left group restriction switches (permanently) to an **inner-join pattern**, removing from the left group all resources having no successor in the right group. +By default, the exploration applies a **left-join pattern**: The left group remains static, while the right group contains the connected resources. The left group restriction switches (permanently) to an **inner-join pattern**, removing from the left group all resources having no successor in the right group. - **Action:** Right-click the connection to open its context menu and select `Restrict left group`. - **Outcome:** @@ -39,14 +39,14 @@ By default, the exploration applies a **left-join pattern**: The left group rema ![Backpropagation Connection Context](../assets/backpropagation-connection.png){ class="bordered" width="75%" } !!! warning "Effects of Subsequent Updates" - + Because the switch to an **inner-join pattern** is permanent, any further filtering of the right group will modify the contents of the right group. In the example above, narrowing `Confections | Beverages` to `Beverages` only will remove from the `Supplier` group the ones who do not provide beverages. !!! hint "Multistep Backpropagation" - + To combine constraints from a variety of traversals conjunctively, restrictions are often propagated back to the start node of the exploration (see the [Supply Chain Analysis Tutorial](../tutorial.md#backpropagation)) @@ -68,8 +68,8 @@ While the **[table of a group](objects-table.md)** lists resources, the **connec To open it: -1. **Context Menu:** Right-click the connection and select `Show connection table`. -2. **Quick Action:** Click the connection to select it, then click the blue `Show connection table...` button at the bottom of the canvas. +1. **Context Menu:** Right-click the connection and select `Show connection table`. +2. **Quick Action:** Click the connection to select it, then click the blue `Show connection table...` button at the bottom of the canvas. ### Table Components @@ -85,4 +85,4 @@ The table displays one row per object property assertion with predefined and var - **Structural columns:** The resource and object property columns support **search** and **sorting**, but the filtering cannot be applied (filter the groups or select another object property instead). - **Data Property columns:** Full support with filtering like the [resource table](objects-table.md#filtering) -- **Highlighting:** Hovering over a row highlights the corresponding connected resources in the exploration if the groups are in [instance rendering mode](canvas-ui.md#object-threshold-logic). \ No newline at end of file +- **Highlighting:** Hovering over a row highlights the corresponding connected resources in the exploration if the groups are in [instance rendering mode](canvas-ui.md#object-threshold-logic). diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md b/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md index 712b8989d..8a300870b 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md @@ -15,7 +15,7 @@ Selecting a group opens the **group details** in the right pane. The header contains: - The **group caption** that is either: - - The caption of its class if no class filter was applied + - The caption of its class if no class filter was applied - A "logical expression" of class captions describing the combination of class filter applied otherwise (see [histogram filtering](#filtering)) - A structured **natural language description** of its contents that details all steps of the traversal. @@ -24,7 +24,7 @@ The header contains: The **histogram sections** display the distribution of classes within a group. It allows analysts to scrutinize class composition (eg. spot unexpected or missing classes) and perform logical set operations (union, intersection, difference) to restrict the contents of the groups. - **Scope:** The panel displays counts for all **classes** (including **[user-defined classes](persistence.md#custom-categories)**) present within the selected group. - *Remark: If user-defined classes are present, a second histogram is shown for better overview* + *Remark: If user-defined classes are present, a second histogram is shown for better overview* - **Hierarchy:** Use the `(+)` icons to expand a class (e.g., `Confections`) to reveal the distribution of its sub-classes. - **Sorting:** Use the dropdown to order bars by `Name` (Alphabetical) or `Quantity` (Cardinality). @@ -41,7 +41,7 @@ Flagging highlights specific subsets of data for visual inspection without modif ![Flagged Resources in Group](../assets/histogram-flagged-resources.png){ class="bordered" width="80%" } !!! warning "Visibility Threshold" - + Flagging is only visible when the group is in **instance rendering mode** (count <= threshold). See the [threshold setting](canvas-ui.md#object-threshold-logic). @@ -59,7 +59,7 @@ Histograms function as a visual facet query builder. Use the buttons to apply bo | **Intersection (AND)** | **Sequential** Operation | 1. Restrict to `Class A`.
2. Select `Class B` in the updated list.
3. Restrict to `Class B`.
**Outcome:** Items that are **BOTH** A and B. | !!! warning "Multiple Histograms" - + When applying a filter, selections made **simultaneously in different histograms** are combined **conjunctively (AND)** in a facet-like logic. @@ -74,14 +74,14 @@ As filters are applied, the **group caption** updates to reflect the mathematica The **expansion menu** of a group presents the options available for adding a non-empty step to the traversal. It tells you which classes are reachable via which direction of which object properties. -1. **Action:** Click any group to open the menu. -2. **Select:** The menu presents the hierarchy of connecting (directed) **object properties**, organized by target **class**. +1. **Action:** Click any group to open the menu. +2. **Select:** The menu presents the hierarchy of connecting (directed) **object properties**, organized by target **class**. - If the data model includes sub-object properties, point the parent object property to open the sub-menu with its child object property.* -3. **Action:** Click a **object property** with the required direction for the selected target **class** to add a new connection beam to the exploration. +3. **Action:** Click a **object property** with the required direction for the selected target **class** to add a new connection beam to the exploration. ![Expansion Menu](../assets/expansion-menu.png){ class="bordered" width="65%" } -4. **Alternative:** For schemas with deep object property hierarchies (or if you know exactly which object property) use the search bar to locate specific paths. +1. **Alternative:** For schemas with deep object property hierarchies (or if you know exactly which object property) use the search bar to locate specific paths. ![Filtered Expansion Menu](../assets/expansion-menu-flattened.png){ class="bordered" width="65%" } @@ -97,6 +97,6 @@ Right-click a group or its caption to access its context menu. - **Show resource table:** Opens the tabular view (see the [dedicated page](objects-table.md)). !!! tip "Usage" - + - The query is intended to be used directly on the database for various datasets with the same model (or slight variations thereof). - Use the notes to explain what is the content of the group in your own words. This is particularly relevant for collaborative work. diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/index.md b/docs/explore-and-author/graph-exploration/graph-insights/features/index.md index d0f65e979..75ed17eb7 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/index.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/index.md @@ -6,7 +6,7 @@ Documentation for the core components of the Graph Insights interface. ## Workspace -- **[Application and dataset settings](application-settings.md):** Application fine-tuning, dataset selection. +- **[Application and dataset settings](application-settings.md):** Application fine-tuning, dataset selection. - **[Exploration canvas](canvas-ui.md):** Layout controls, information density management, and exploration tree exports. - **[Class tree](category-tree.md):** Starting an exploration, class configuration (captions/visibility). @@ -20,4 +20,4 @@ Documentation for the core components of the Graph Insights interface. ## Analysis Tools - **[Histograms](groups.md#histograms):** class distribution analysis and facet-like set operations (union, intersection, difference). -- **[Persistence](persistence.md):** Saved explorations, user-defined classes, and JSON sharing. \ No newline at end of file +- **[Persistence](persistence.md):** Saved explorations, user-defined classes, and JSON sharing. diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/objects-table.md b/docs/explore-and-author/graph-exploration/graph-insights/features/objects-table.md index db376cc91..6b26cfbba 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/objects-table.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/objects-table.md @@ -32,10 +32,10 @@ The resource table is the engine for refining the exploration tree. Apply filter Filtering is a two-step process to prevent unnecessary database queries. -1. **Define and preview:** Set criteria in the column headers. +1. **Define and preview:** Set criteria in the column headers. - **Outcome:** The table updates immediately to preview the result, and the column header displays an **hourglass icon (⧖)** (symbolizes filtering). - Repeat with further columns as necessary. -2. **Apply:** Click `Apply filters` (top-left). +2. **Apply:** Click `Apply filters` (top-left). - **Outcome:** The group on the canvas updates, and a **half full disk icon (◐)** appears on the group (indicating filtered content). ![Filter Status Icons](../assets/filtered-group-and-column.png){ class="bordered" width="85%" } @@ -74,7 +74,7 @@ Unlike the bulk group export, table exports respect the **current view** (filter - **Copy SPARQL:** Copies the query used to generate the current table view. !!! tip "Remarks" - + - The query is intended to be used directly on the database for various datasets with the same model (or slight variations thereof). - The table query was designed for dashboard integration. Make sure to configure the table to match exactly your requirements. - The query might look overcomplicated but it includes lots of special case handling to replicate the table view as close as possible. diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/objects.md b/docs/explore-and-author/graph-exploration/graph-insights/features/objects.md index 79f81846b..086c143ad 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/objects.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/objects.md @@ -2,7 +2,7 @@ # Resources -Most investigations require **isolating individual resources** at some point for closer inspection. This page covers the **resource details pane**, the **highlighting and flagging tools**, and **single resource traversals**. +Most investigations require **isolating individual resources** at some point for closer inspection. This page covers the **resource details pane**, the **highlighting and flagging tools**, and **single resource traversals**. --- @@ -14,16 +14,17 @@ Select a visible resource in a group or its row in the group table to open the ` ### Components -1. **Header:** Displays the **caption** and **description** , as well as the IRI of the resource . -2. **Classes:** List of all classes assigned to the resource. -3. **Data Properties:** List of all data properties organized in sections by their "usage domain" classes (see below). +1. **Header:** Displays the **caption** and **description** , as well as the IRI of the resource . +2. **Classes:** List of all classes assigned to the resource. +3. **Data Properties:** List of all data properties organized in sections by their "usage domain" classes (see below). ### Controls + - **Hide empty fields:** Toggles visibility of data properties that have no value for this specific resource. - **Copy Data:** Right-click any row to copy the **data property** or **data property assertion**. !!! info "Why are there sections?" - + - Graph Insights analyses the actual graph data and infers at which level of the class hierarchy each data properties is effectively used. Besides giving some insights about the real usage domain of data properties, it is used to prevent the [resources table](objects-table.md) of the higher level classes from being "polluted" by all specific data properties of all their sub-classes. - If a data property is "defined" by multiple classes (e.g., `rdfs:label` used by both `Clothing` and `SeasonalCollection`), Graph Insights groups them into a shared section (e.g., `Clothing & SeasonalCollection`) to avoid duplication. @@ -46,7 +47,7 @@ The standard flags are persistent markers used to track the appearance of specif ![Object Flag and Note](../assets/object-flag-and-note.png){ class="bordered" width="60%" } !!! info "About Persistence" - + - A flag will appear anytime a new instance of the flagged resource is visible in a subsequent exploration step (even in new explorations). - Flags and notes can be removed using the context-menu of the resource, the group, or the exploration (canvas). @@ -64,7 +65,7 @@ Most flagging actions that might affect numerous nodes create temporary flags fo ![Temporary Flags Dropdown](../assets/temporary-vs-permanent-flags.png){ class="bordered" width="80%" } !!! tip "Useful Tip" - + Add a standard flag to a temporarily flagged resource you identified via a global search and clear the temporary flags for a cleaner exploration. @@ -83,13 +84,13 @@ Visualize upstream and downstream connections of a resource in the exploration t **Flagging:** -- **Action:** Right-click and select `Flag all connected resources...` +- **Action:** Right-click and select `Flag all connected resources...` - **Outcome:** [Temporary flags](#temporary-flags) are added to all connected resources visible. ![Branch Highlighting](../assets/flag-predecessors-successors.png){ class="bordered" width="95%" } !!! warning "Line of Sight" - + Path highlighting is blocked by **count-only groups** (count > threshold). @@ -97,8 +98,8 @@ Visualize upstream and downstream connections of a resource in the exploration t ## Connectivity Visualization (Predecessors) {#predecessors} -In the **groups in instance rendering mode** (count <= threshold), Graph Insights visually encodes the local "centrality" of resources based on their connectivity to the previous group in the exploration tree (if any). - +In the **groups in instance rendering mode** (count <= threshold), Graph Insights visually encodes the local "centrality" of resources based on their connectivity to the previous group in the exploration tree (if any). + - **Definition:** The **number of predecessors** of a resource is the number of distinct resources from the *previous* group in the current branch of the exploration tree connected to it. - **Visualization:** - The number of predecessors is displayed in the dot representing the resources, no number meaning there is only a single predecessor. @@ -113,7 +114,7 @@ In the **groups in instance rendering mode** (count <= threshold), Graph Insight Analysts can focus on a specific resource within a group to analyze its relationships. !!! warning "Prerequisite" - + - These actions require the group containing the object to be in **instance rendering mode** (resources visible). - If it is not the case, consider reducing the group size by filtering or directly selecting the resource from the resources table of the group. - Locate the resource via [search](canvas-ui.md#global-search) or the [resource table](objects-table.md). @@ -129,7 +130,7 @@ Expand the exploration with specific connections from a single resource within a ![Expand Individual Object](../assets/expand-object-from-group.png){ class="bordered" width="75%" } !!! info "Independence of the Explorations" - + The resource exploration does not depend on the exploration that leads to the group. Any change to the main exploration either preserves the resource exploration (if the resource is still visible) or drops it (if the resource is no longer visible). diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/persistence.md b/docs/explore-and-author/graph-exploration/graph-insights/features/persistence.md index 788df6e2a..512ff5c83 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/persistence.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/persistence.md @@ -5,7 +5,7 @@ Graph Insights provides mechanisms to persist analysis logic, annotate findings, and extend the class taxonomy. This page covers **saving and sharing explorations**, **group annotations**, and **user-defined classes**. !!! info "Dynamic Persistence" - + Saved items preserve the **query logic** (the sequence of expansions and filters), not a static snapshot of data. If the underlying dataset changes, loading a saved exploration will reflect the current state of the database. @@ -18,14 +18,14 @@ A **saved exploration** preserves the **structure** of the analysis: classes of To save the current exploration, either right-click the canvas background to open the exploration context menu or open the `Exploration` top menu, then select `Save exploration`. !!! warning "Structure over Layout" - + The **structure** is not the exact **visual layout** (x/y coordinates). When loaded, the exploration tree re-arranges itself for optimal readability, adapting the rendering to the possibly modified number of resources in the groups. ### Saved Exploration Folders The saved explorations folders behave like a usual file system: - + - Use the **context menu of the folders** in the saved explorations section of the left pane **to create new subfolders**. - New saved explorations are placed **in the folder highlighted at the time of the operation.** - **Names** must only be **unique within their folder**. @@ -39,7 +39,7 @@ If you load and modify a loaded exploration (e.g., add a filter), Graph Insights ### Annotations (Group Notes) -Add context to the analysis by attaching text notes to important groups. +Add context to the analysis by attaching text notes to important groups. - **Add note:** Right-click a group to open its context menu and select `Add note`. - **Toggle visibility:** Use the group context menu to toggle the visibility of a single note, or use the context menu of the canvas (or the `Exploration` top menu) and select `Hide/Show all group notes`. @@ -47,12 +47,12 @@ Add context to the analysis by attaching text notes to important groups. ![Saved Exploration Context Menu](../assets/save-exploration-context-menu-and-notes.png){ class="bordered" width="75%" } !!! tip "Notes as Documentation" - + The notes are persisted in saved explorations to allow documentation for future usage. --- - + ## User-Defined Classes {#custom-categories} **User-defined Graph Insights classes** are customizable tools designed to analyze data with specific goals (e.g. identifying high-risk suppliers based at a given location and having a specific contract type). @@ -63,7 +63,7 @@ They act as **"virtual classes"**, refining the model within the exploration lay A user-defined Graph Insights class can be defined for any group in any exploration (e.g., "High-volume suppliers located in Germany"). - **Action:** Right-click the group or its caption to open its context menu and select `Add Graph Insights class for this group`. -- **Outcome:** +- **Outcome:** - The group is relabeled with the name of the new class. - The new class is **added to the class tree** and the Graph Insights classes list in the right pane, and is handled like a standard class. @@ -71,13 +71,13 @@ A user-defined Graph Insights class can be defined for any group in any explorat !!! info "Non-Destructive Model Refinement" - + User-defined classes are **non-destructive**. They exist only in the visualization layer and do **not** write changes back to the RDF store. !!! tip "Hints" - + - User-defined classes also appear in an additional specific Graph Insights class histogram for improved visibility (e.g. it allows you to see at first glance how many "High-Risk Suppliers" exist within a larger "Supplier" group). - You can create a user-defined class based on *another* user-defined class. @@ -89,12 +89,12 @@ A user-defined Graph Insights class can be defined for any group in any explorat Share an exploration serialized as a JSON string. -1. **Export:** Right-click the canvas to open the exploration context menu or use the `Exploration` top menu, then select `Copy exploration as JSON`. -2. **Share:** Send the text blob to another user. -3. **Import:** The recipient selects `Start exploration from JSON` from the canvas context menu or the `Exploration` top menu, and pastes the string. +1. **Export:** Right-click the canvas to open the exploration context menu or use the `Exploration` top menu, then select `Copy exploration as JSON`. +2. **Share:** Send the text blob to another user. +3. **Import:** The recipient selects `Start exploration from JSON` from the canvas context menu or the `Exploration` top menu, and pastes the string. !!! warning "Limitations" - + The success of exploration sharing on distinct servers depends greatly on the similarity between the source and target dataset and the specificity of the exploration. Sharing an exploration is in fact just like sharing multiple complex queries. @@ -108,4 +108,3 @@ Export the current canvas state for reporting. - **Formats:** - **Raster (PNG):** For slides and quick sharing. - **Vector (SVG):** For high-resolution print or editing. - diff --git a/docs/explore-and-author/graph-exploration/graph-insights/index.md b/docs/explore-and-author/graph-exploration/graph-insights/index.md index b3d53490e..889738f04 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/index.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/index.md @@ -50,13 +50,13 @@ The **user guide** presents the visual analysis workflow of Graph Insights. It d ### Where to Start? -1. **[Core Concepts](concepts.md):** *(Recommended)* Before diving in, understand the **aggregated exploration tree** paradigm of Graph Insights that prevents the visual clutter of traditional force-directed graph renderings ("hairballs"). +1. **[Core Concepts](concepts.md):** *(Recommended)* Before diving in, understand the **aggregated exploration tree** paradigm of Graph Insights that prevents the visual clutter of traditional force-directed graph renderings ("hairballs"). -2. **[Tutorial: Supply Chain Analysis](tutorial.md):** A step-by-step walkthrough using the Northwind dataset. Learn how to filter for **VIP customers**, analyze **product vulnerabilities**, and use **backpropagation** to propagate filtering. +2. **[Tutorial: Supply Chain Analysis](tutorial.md):** A step-by-step walkthrough using the Northwind dataset. Learn how to filter for **VIP customers**, analyze **product vulnerabilities**, and use **backpropagation** to propagate filtering. -3. **[Feature Reference](features/index.md):** Detailed documentation for every component of the interface. +3. **[Feature Reference](features/index.md):** Detailed documentation for every component of the interface. - **Workspace:** [Canvas](features/canvas-ui.md), [Search](features/category-tree.md), and [Settings](features/application-settings.md#settings). - **Interaction:** [Groups](features/groups.md), [Connections](features/connections.md), and [Details](features/objects.md#object-details). - **Analysis:** [Histograms](features/groups.md#histograms) and [Persistence](features/persistence.md). -4. **[Platform Integration](navigating-to-cmem.md):** Learn how to seamlessly jump from a graph node to other Corporate Memory views. +4. **[Platform Integration](navigating-to-cmem.md):** Learn how to seamlessly jump from a graph node to other Corporate Memory views. diff --git a/docs/explore-and-author/graph-exploration/graph-insights/tutorial.md b/docs/explore-and-author/graph-exploration/graph-insights/tutorial.md index 283cbcc52..3793cd1c1 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/tutorial.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/tutorial.md @@ -23,9 +23,9 @@ We will traverse the ordering chain, identify high-volume US partners, and isola Every exploration requires a starting point. We begin with the central entity: The **product**. -1. Locate the `Product` **class** in the [class tree](features/category-tree.md) in the left pane. -2. Drag it to the center of the canvas. -3. **Outcome:** A group appears, representing the entire inventory (e.g., 77 Products). +1. Locate the `Product` **class** in the [class tree](features/category-tree.md) in the left pane. +2. Drag it to the center of the canvas. +3. **Outcome:** A group appears, representing the entire inventory (e.g., 77 Products). ![Start with Product Category](assets/tutorial-start-with-product.png){ class="bordered" width="80%" } @@ -38,6 +38,7 @@ Next, we expand the exploration tree to connect Suppliers and Customers. ### Preparation To simplify the expansion menus, hide generic classes that are not relevant to this analysis. + - Right-click the `Agent` class in the class tree. - Select `Deactivate class`. @@ -45,11 +46,11 @@ To simplify the expansion menus, hide generic classes that are not relevant to t We extend the exploration tree from the central `Product` group. -1. **Add suppliers:** +1. **Add suppliers:** - Select the `Product` group. - Expand to the `Supplier` class via `→ hasSupplier`. -2. **Add customers:** +2. **Add customers:** - Select the `Product` group again. - Expand to the `Order` class via `← includesProduct`. - From the new `Order` group, expand to the `Customer` class via `→ hasCustomer`. @@ -63,23 +64,24 @@ We extend the exploration tree from the central `Product` group. The exploration tree currently visualizes all connections. We must now isolate the high-value targets (VIPs) and the specific risk region (UK). ### 3.1. Identify VIP US Customers + We define "VIP" as customers with the highest volume of incoming orders. -1. Select the `Customer` group and open the [resource table](features/objects-table.md). -2. **Filter by country:** Open the menu of the `country` column from its header (using the menu dropdown or a right-click) and enter `USA` in the text field of the filter submenu. -3. **Identify VIPs:** Click the **Predecessors** column header to **Sort Descending**. This ranks customers by their incoming connection count (number of Orders). +1. Select the `Customer` group and open the [resource table](features/objects-table.md). +2. **Filter by country:** Open the menu of the `country` column from its header (using the menu dropdown or a right-click) and enter `USA` in the text field of the filter submenu. +3. **Identify VIPs:** Click the **Predecessors** column header to **Sort Descending**. This ranks customers by their incoming connection count (number of Orders). ![USA Customer Filter](assets/tutorial-usa-customer-filter.png){ class="bordered" width="80%" } -4. **Select and restrict:** Check the boxes for the top 5 customers and click `Restrict to selection`. +1. **Select and restrict:** Check the boxes for the top 5 customers and click `Restrict to selection`. ![Top USA Customer Filter](assets/tutorial-top-usa-customer-filter.png){ class="bordered" width="85%" } ### 3.2. Isolate UK Suppliers -1. Select the `Supplier` group to open the table. -2. Filter `Country` to `UK`. -3. Click `Apply filters`. +1. Select the `Supplier` group to open the table. +2. Filter `Country` to `UK`. +3. Click `Apply filters`. ![UK Supplier Filter](assets/tutorial-uk-suppliers-filter.png){ class="bordered" width="85%" } @@ -132,18 +134,18 @@ We have isolated the high risk product group, that we can now analyze: Finally, we convert this visual insight into an actionable list for the procurement team. -1. **Drill down:** +1. **Drill down:** - In the histogram, right-click the `Confections` bar. - Select `Restrict group to class`. This discards the other product types, leaving only the high-risk confections, with the side effect of reducing the UK suppliers to a single resource. -2. **Visualize traversals including a specific resource:** +2. **Visualize traversals including a specific resource:** - Open the resource table of the `Products` group. - Right-click a specific high-value item (e.g., "Teatime Chocolate Biscuits") to open its context menu. - Select `Flag all connected resources on the branch`. ![Supply Chain: Flag Branch](assets/tutorial-supply-chain-flag-branch-menu.png){ class="bordered" width="85%" } - - Graph Insights highlights all resources on a specific high-risk traversal: The specific UK Supplier → The specific Biscuit → The specific VIP US Customers buying it. +- Graph Insights highlights all resources on a specific high-risk traversal: The specific UK Supplier → The specific Biscuit → The specific VIP US Customers buying it. ![Supply Chain: Flagged Branch](assets/tutorial-supply-chain-flagged-branch.png){ class="bordered" width="85%" } From 70a9f136734c67b8d8c8f370f5134c5ae0904788 Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Fri, 27 Feb 2026 13:51:53 +0100 Subject: [PATCH 16/17] baseline apply auto fixes applied --- .../graph-exploration/graph-insights/concepts.md | 2 +- .../graph-insights/features/category-tree.md | 2 +- .../graph-exploration/graph-insights/features/groups.md | 2 +- .../graph-exploration/graph-insights/features/objects.md | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/explore-and-author/graph-exploration/graph-insights/concepts.md b/docs/explore-and-author/graph-exploration/graph-insights/concepts.md index f82ea4182..329270e8e 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/concepts.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/concepts.md @@ -100,4 +100,4 @@ Narrow down any group to match your current focus using [data property filters]( - **Images:** [Export](features/persistence.md#export-visuals) the canvas as SVG or PNG images. - **CSV data:** Download of the resources of a group or of the table view of a group or connection. - **Query snippets:** Copy the SPARQL that returns the resources of a group or the data in the table view of a group or connection for use (e.g. a dashboard). -- **[JSON Sharing](features/persistence.md#json-sharing):** Share explorations as JSON-serialization. \ No newline at end of file +- **[JSON Sharing](features/persistence.md#json-sharing):** Share explorations as JSON-serialization. diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/category-tree.md b/docs/explore-and-author/graph-exploration/graph-insights/features/category-tree.md index 2642bce53..19122bc43 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/category-tree.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/category-tree.md @@ -54,4 +54,4 @@ The tree is the primary tool to populate the canvas: ## Class Tree Configuration Reset -To return the class tree configuration to its initial state, click the **circular arrow icon** in the pane header. This action restores all deactivated classes and reverts caption settings to defaults. \ No newline at end of file +To return the class tree configuration to its initial state, click the **circular arrow icon** in the pane header. This action restores all deactivated classes and reverts caption settings to defaults. diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md b/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md index c0feeac8b..8e2b718a1 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md @@ -77,7 +77,7 @@ The **expansion menu** of a group presents the options available for adding a no 1. **Action:** Click any group to open the menu. 2. **Select:** The menu presents the hierarchy of connecting (directed) **object properties**, organized by target **class**. - If the data model includes sub-object properties, point the parent object property to open the sub-menu with its child object property.* -3. **Action:** Click an object property with the required direction for the selected target **class** to add a new connection beam to the exploration. +3. **Action:** Click an object property with the required direction for the selected target **class** to add a new connection beam to the exploration. ![Expansion Menu](../assets/expansion-menu.png){ class="bordered" width="65%" } diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/objects.md b/docs/explore-and-author/graph-exploration/graph-insights/features/objects.md index 9a8ecb4b5..5e3c46ffc 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/objects.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/objects.md @@ -14,9 +14,9 @@ Select a visible resource in a group or its row in the group table to open the ` ### Components -1. **Header:** Displays the **caption** and **description** , as well as the IRI of the resource. -2. **Classes:** List of all classes assigned to the resource. -3. **Data Properties:** List of all data properties organized in sections by their "usage domain" classes (see below). +1. **Header:** Displays the **caption** and **description** , as well as the IRI of the resource. +2. **Classes:** List of all classes assigned to the resource. +3. **Data Properties:** List of all data properties organized in sections by their "usage domain" classes (see below). ### Controls From 08bd7b57265109eb9442b0dee44db4a619c5ea0e Mon Sep 17 00:00:00 2001 From: Rene Pietzsch Date: Sat, 7 Mar 2026 11:16:56 +0100 Subject: [PATCH 17/17] fix `rumdl` issues in gi sections. --- .../graph-exploration/graph-insights/features/groups.md | 2 +- .../graph-exploration/graph-insights/navigating-to-cmem.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md b/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md index 8e2b718a1..317b8a304 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/features/groups.md @@ -24,7 +24,7 @@ The header contains: The **histogram sections** display the distribution of classes within a group. It allows analysts to scrutinize class composition (eg. spot unexpected or missing classes) and perform logical set operations (union, intersection, difference) to restrict the contents of the groups. - **Scope:** The panel displays counts for all **classes** (including **[user-defined classes](persistence.md#custom-categories)**) present within the selected group. - *Remark: If user-defined classes are present, a second histogram is shown for better overview* + - *Remark:* If user-defined classes are present, a second histogram is shown for better overview - **Hierarchy:** Use the `(+)` icons to expand a class (e.g., `Confections`) to reveal the distribution of its sub-classes. - **Sorting:** Use the dropdown to order bars by `Name` (Alphabetical) or `Quantity` (Cardinality). diff --git a/docs/explore-and-author/graph-exploration/graph-insights/navigating-to-cmem.md b/docs/explore-and-author/graph-exploration/graph-insights/navigating-to-cmem.md index d090e94a8..7a9e019a0 100644 --- a/docs/explore-and-author/graph-exploration/graph-insights/navigating-to-cmem.md +++ b/docs/explore-and-author/graph-exploration/graph-insights/navigating-to-cmem.md @@ -28,14 +28,14 @@ If you isolate a specific resource of interest during your exploration, you can There are two ways to trigger this navigation: -**From the Canvas** +### From the Canvas - **Action:** Right-click a specific resource dot within a group to open its context menu. - **Select:** `View in CMEM`. ![View Resource in Corporate Memory from Canvas](graph-insights-view-in-cmem-resources.png){ class="bordered" width="75%" } -**From the Details Pane** +### From the Details Pane - **Action:** Select a resource on the canvas or in the resource table to load its details in the right pane. - **Select:** Click the `View in CMEM` button located at the top of the Details pane.