diff --git a/docs/Manifest.toml b/docs/Manifest.toml index c92cfab..17096b9 100644 --- a/docs/Manifest.toml +++ b/docs/Manifest.toml @@ -1,23 +1,18 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.9.1" +julia_version = "1.10.2" manifest_format = "2.0" -project_hash = "fe9e085d7cf73c701b832b2accdcbe0e649f57e4" +project_hash = "c453af1ca55f49eb0e1ed431f834146ed12716b2" [[deps.ANSIColoredPrinters]] git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c" uuid = "a4c015fc-c6ff-483c-b24f-f7ea428134e9" version = "0.0.1" -[[deps.Adapt]] -deps = ["LinearAlgebra", "Requires"] -git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24" -uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "3.6.2" -weakdeps = ["StaticArrays"] - - [deps.Adapt.extensions] - AdaptStaticArraysExt = "StaticArrays" +[[deps.AbstractTrees]] +git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177" +uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" +version = "0.4.5" [[deps.ArgTools]] uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" @@ -25,9 +20,9 @@ version = "1.1.1" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra"] -git-tree-sha1 = "af2bfc397e20a96145fe5bb1df32235a4100ca7e" +git-tree-sha1 = "33207a8be6267bc389d0701e97a9bce6a4de68eb" uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" -version = "1.4.0" +version = "1.9.2" weakdeps = ["SparseArrays"] [deps.ArrayLayouts.extensions] @@ -41,9 +36,9 @@ uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" [[deps.BlockArrays]] deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra"] -git-tree-sha1 = "54cd829dd26330c42e1cf9df68470dd4df602c61" +git-tree-sha1 = "9a9610fbe5779636f75229e423e367124034af41" uuid = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -version = "0.16.38" +version = "0.16.43" [[deps.CEnum]] git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90" @@ -52,15 +47,15 @@ version = "0.4.2" [[deps.CircularArrays]] deps = ["OffsetArrays"] -git-tree-sha1 = "61bc114e595167090b4cbcb7305ddeacd4274f16" +git-tree-sha1 = "e24a6f390e5563583bb4315c73035b5b3f3e7ab4" uuid = "7a955b69-7140-5f4e-a0ed-f168c5e2e749" -version = "1.3.2" +version = "1.4.0" [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] -git-tree-sha1 = "02aa26a4cf76381be7f66e020a3eddeb27b0a092" +git-tree-sha1 = "59939d8a997469ee05c4b4944560a820f9ba0d73" uuid = "944b1d66-785c-5afd-91f1-9de20f533193" -version = "0.7.2" +version = "0.7.4" [[deps.CommonSubexpressions]] deps = ["MacroTools", "Test"] @@ -71,7 +66,7 @@ version = "0.3.0" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -version = "1.0.2+0" +version = "1.1.0+0" [[deps.Dates]] deps = ["Printf"] @@ -91,14 +86,18 @@ version = "1.15.1" [[deps.Distances]] deps = ["LinearAlgebra", "Statistics", "StatsAPI"] -git-tree-sha1 = "b6def76ffad15143924a2199f72a5cd883a2e8a9" +git-tree-sha1 = "66c4c81f259586e8f002eacebc177e1fb06363b0" uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" -version = "0.10.9" -weakdeps = ["SparseArrays"] +version = "0.10.11" [deps.Distances.extensions] + DistancesChainRulesCoreExt = "ChainRulesCore" DistancesSparseArraysExt = "SparseArrays" + [deps.Distances.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + [[deps.Distributed]] deps = ["Random", "Serialization", "Sockets"] uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" @@ -110,10 +109,10 @@ uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" version = "0.9.3" [[deps.Documenter]] -deps = ["ANSIColoredPrinters", "Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"] -git-tree-sha1 = "39fd748a73dce4c05a9655475e437170d8fb1b67" +deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "CodecZlib", "Dates", "DocStringExtensions", "Downloads", "Git", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "Pkg", "PrecompileTools", "REPL", "RegistryInstances", "SHA", "TOML", "Test", "Unicode"] +git-tree-sha1 = "5461b2a67beb9089980e2f8f25145186b6d34f91" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "0.27.25" +version = "1.4.1" [[deps.Downloads]] deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] @@ -125,6 +124,12 @@ git-tree-sha1 = "bdb1942cd4c45e3c678fd11569d5cccd80976237" uuid = "4e289a0a-7415-4d19-859d-a7e5c4648b56" version = "1.0.4" +[[deps.Expat_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "1c6317308b9dc757616f0b5cb379db10494443a7" +uuid = "2e619515-83b5-522b-bb60-26c02a35a201" +version = "2.6.2+0" + [[deps.ExprTools]] git-tree-sha1 = "27415f162e6028e81c72b82ef756bf321213b6ec" uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04" @@ -132,7 +137,7 @@ version = "0.1.10" [[deps.Ferrite]] deps = ["EnumX", "LinearAlgebra", "NearestNeighbors", "Preferences", "Reexport", "SparseArrays", "StaticArrays", "Tensors", "WriteVTK"] -git-tree-sha1 = "31fdba40e9e01ed36013fb4eb8b8ff9f4ee68173" +git-tree-sha1 = "5b42946f1e77c19dd55fb18239ac0fd63cac6e1c" repo-rev = "master" repo-url = "https://github.com/Ferrite-FEM/Ferrite.jl.git" uuid = "c061ca5d-56c9-439f-9c0e-210fe06d3992" @@ -159,16 +164,21 @@ weakdeps = ["HYPRE", "Metis", "PartitionedArrays"] uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" [[deps.FillArrays]] -deps = ["LinearAlgebra", "Random"] -git-tree-sha1 = "a20eaa3ad64254c61eeb5f230d9306e937405434" +deps = ["LinearAlgebra"] +git-tree-sha1 = "0653c0a2396a6da5bc4766c43041ef5fd3efbe57" uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" -version = "1.6.1" -weakdeps = ["SparseArrays", "Statistics"] +version = "1.11.0" [deps.FillArrays.extensions] + FillArraysPDMatsExt = "PDMats" FillArraysSparseArraysExt = "SparseArrays" FillArraysStatisticsExt = "Statistics" + [deps.FillArrays.weakdeps] + PDMats = "90014a1f-27ba-587c-ab20-58faa44d9150" + SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"] git-tree-sha1 = "cf0fe81336da9fb90944683b8c41984b08793dad" @@ -179,6 +189,18 @@ weakdeps = ["StaticArrays"] [deps.ForwardDiff.extensions] ForwardDiffStaticArraysExt = "StaticArrays" +[[deps.Git]] +deps = ["Git_jll"] +git-tree-sha1 = "04eff47b1354d702c3a85e8ab23d539bb7d5957e" +uuid = "d7ba0133-e1db-5d97-8f8c-041e4b3a1eb2" +version = "1.3.1" + +[[deps.Git_jll]] +deps = ["Artifacts", "Expat_jll", "JLLWrappers", "LibCURL_jll", "Libdl", "Libiconv_jll", "OpenSSL_jll", "PCRE2_jll", "Zlib_jll"] +git-tree-sha1 = "d18fb8a1f3609361ebda9bf029b60fd0f120c809" +uuid = "f8c6e375-362e-5223-8a59-34ff63f689eb" +version = "2.44.0+2" + [[deps.HYPRE]] deps = ["CEnum", "HYPRE_jll", "Libdl", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR"] git-tree-sha1 = "9ef07430de39f36e07a74a90dccdb08630c48409" @@ -191,11 +213,17 @@ git-tree-sha1 = "b77d3eca75f8442e034ccf415c87405a49e77985" uuid = "0a602bbd-b08b-5d75-8d32-0de6eef44785" version = "2.23.1+1" +[[deps.Hwloc_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "ca0f6bf568b4bfc807e7537f081c81e35ceca114" +uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8" +version = "2.10.0+0" + [[deps.IOCapture]] deps = ["Logging", "Random"] -git-tree-sha1 = "d75853a0bdbfb1ac815478bacd89cd27b550ace6" +git-tree-sha1 = "8b72179abc660bfab5e28472e019392b97d0985c" uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" -version = "0.2.3" +version = "0.2.4" [[deps.InteractiveUtils]] deps = ["Markdown"] @@ -208,9 +236,9 @@ version = "0.2.2" [[deps.IterativeSolvers]] deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"] -git-tree-sha1 = "1169632f425f79429f245113b775a0e3d121457c" +git-tree-sha1 = "59545b0a2b27208b0650df0a46b8e3019f85055b" uuid = "42fd0dbc-a981-5370-80f2-aaf504508153" -version = "0.9.2" +version = "0.9.4" [[deps.JLLWrappers]] deps = ["Artifacts", "Preferences"] @@ -230,6 +258,11 @@ git-tree-sha1 = "cc6ca91b2ca8d3600478d40097d30a6f046d0759" uuid = "51474c39-65e3-53ba-86ba-03b1b862ec14" version = "3.11.0+0" +[[deps.LazilyInitializedFields]] +git-tree-sha1 = "8f7f3cabab0fd1800699663533b6d5cb3fc0e612" +uuid = "0e77f7df-68c5-4e49-93ce-4cd80f5598bf" +version = "1.2.2" + [[deps.LazyArtifacts]] deps = ["Artifacts", "Pkg"] uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" @@ -237,21 +270,26 @@ uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" [[deps.LibCURL]] deps = ["LibCURL_jll", "MozillaCACerts_jll"] uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" -version = "0.6.3" +version = "0.6.4" [[deps.LibCURL_jll]] deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" -version = "7.84.0+0" +version = "8.4.0+0" [[deps.LibGit2]] -deps = ["Base64", "NetworkOptions", "Printf", "SHA"] +deps = ["Base64", "LibGit2_jll", "NetworkOptions", "Printf", "SHA"] uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" +[[deps.LibGit2_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"] +uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5" +version = "1.6.4+0" + [[deps.LibSSH2_jll]] deps = ["Artifacts", "Libdl", "MbedTLS_jll"] uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" -version = "1.10.2+0" +version = "1.11.0+1" [[deps.Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" @@ -264,9 +302,9 @@ version = "1.17.0+0" [[deps.LightXML]] deps = ["Libdl", "XML2_jll"] -git-tree-sha1 = "e129d9391168c677cd4800f5c0abb1ed8cb3794f" +git-tree-sha1 = "3a994404d3f6709610701c7dabfc03fed87a81f8" uuid = "9c8b4983-aa76-5018-a973-4c85ecc9e179" -version = "0.9.0" +version = "0.9.1" [[deps.LinearAlgebra]] deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"] @@ -274,15 +312,15 @@ uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[deps.Literate]] deps = ["Base64", "IOCapture", "JSON", "REPL"] -git-tree-sha1 = "ae5703dde29228490f03cbd64c47be8131819485" +git-tree-sha1 = "596df2daea9c27da81eee63ef2cf101baf10c24c" uuid = "98b081ad-f1c9-55d3-8b20-4c87d4299306" -version = "2.15.0" +version = "2.18.0" [[deps.LogExpFunctions]] deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "7d6dd4e9212aebaeed356de34ccf262a3cd415aa" +git-tree-sha1 = "18144f3e9cbe9b15b070288eef858f71b291ce37" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.26" +version = "0.3.27" [deps.LogExpFunctions.extensions] LogExpFunctionsChainRulesCoreExt = "ChainRulesCore" @@ -305,9 +343,9 @@ version = "5.1.2+0" [[deps.MPI]] deps = ["Distributed", "DocStringExtensions", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "PkgVersion", "PrecompileTools", "Requires", "Serialization", "Sockets"] -git-tree-sha1 = "df53d0e1e0dbebf2315f4cd35e13e52ad43416c2" +git-tree-sha1 = "b4d8707e42b693720b54f0b3434abee6dd4d947a" uuid = "da04e1cc-30fd-572f-bb4f-1f8673147195" -version = "0.20.15" +version = "0.20.16" [deps.MPI.extensions] AMDGPUExt = "AMDGPU" @@ -318,43 +356,49 @@ version = "0.20.15" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" [[deps.MPICH_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"] -git-tree-sha1 = "8a5b4d2220377d1ece13f49438d71ad20cf1ba83" +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"] +git-tree-sha1 = "4099bb6809ac109bfc17d521dad33763bcf026b7" uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4" -version = "4.1.2+0" +version = "4.2.1+1" [[deps.MPIPreferences]] deps = ["Libdl", "Preferences"] -git-tree-sha1 = "781916a2ebf2841467cda03b6f1af43e23839d85" +git-tree-sha1 = "c105fe467859e7f6e9a852cb15cb4301126fac07" uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" -version = "0.1.9" +version = "0.1.11" [[deps.MPItrampoline_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"] -git-tree-sha1 = "6979eccb6a9edbbb62681e158443e79ecc0d056a" +git-tree-sha1 = "ce0ca3dd147c43de175c5aff161315a424f4b8ac" uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748" -version = "5.3.1+0" +version = "5.3.3+1" [[deps.MacroTools]] deps = ["Markdown", "Random"] -git-tree-sha1 = "9ee1618cbf5240e6d4e0371d6f24065083f60c48" +git-tree-sha1 = "2fa9ee3e63fd3a4f7a9a4f4744a52f4856de82df" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -version = "0.5.11" +version = "0.5.13" [[deps.Markdown]] deps = ["Base64"] uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" +[[deps.MarkdownAST]] +deps = ["AbstractTrees", "Markdown"] +git-tree-sha1 = "465a70f0fc7d443a00dcdc3267a497397b8a3899" +uuid = "d0879d2d-cac2-40c8-9cee-1863dc0c7391" +version = "0.1.2" + [[deps.MbedTLS_jll]] deps = ["Artifacts", "Libdl"] uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.2+0" +version = "2.28.2+1" [[deps.Metis]] deps = ["CEnum", "LinearAlgebra", "METIS_jll", "SparseArrays"] -git-tree-sha1 = "66a4f74edb3ac5f28c74de60f9acc2a541fbbe28" +git-tree-sha1 = "5582d3b0d794280c9b818ba56ce2b35b108aca41" uuid = "2679e427-3c69-5b7f-982b-ece356f1e94b" -version = "1.4.0" +version = "1.4.1" [deps.Metis.extensions] MetisGraphs = "Graphs" @@ -368,16 +412,16 @@ version = "1.4.0" [[deps.MicrosoftMPI_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "a8027af3d1743b3bfae34e54872359fdebb31422" +git-tree-sha1 = "f12a29c4400ba812841c6ace3f4efbb6dbb3ba01" uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf" -version = "10.1.3+4" +version = "10.1.4+2" [[deps.Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" [[deps.MozillaCACerts_jll]] uuid = "14a3606d-f60d-562e-9121-12d972cd8159" -version = "2022.10.11" +version = "2023.1.10" [[deps.NaNMath]] deps = ["OpenLibm_jll"] @@ -387,35 +431,46 @@ version = "1.0.2" [[deps.NearestNeighbors]] deps = ["Distances", "StaticArrays"] -git-tree-sha1 = "2c3726ceb3388917602169bed973dbc97f1b51a8" +git-tree-sha1 = "ded64ff6d4fdd1cb68dfcbb818c69e144a5b2e4c" uuid = "b8a86587-4115-5ab1-83bc-aa920d37bbce" -version = "0.4.13" +version = "0.4.16" [[deps.NetworkOptions]] uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" version = "1.2.0" [[deps.OffsetArrays]] -deps = ["Adapt"] -git-tree-sha1 = "2ac17d29c523ce1cd38e27785a7d23024853a4bb" +git-tree-sha1 = "e64b4f5ea6b7389f6f046d13d4896a8f9c1ba71e" uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "1.12.10" +version = "1.14.0" + + [deps.OffsetArrays.extensions] + OffsetArraysAdaptExt = "Adapt" + + [deps.OffsetArrays.weakdeps] + Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" -version = "0.3.21+4" +version = "0.3.23+4" [[deps.OpenLibm_jll]] deps = ["Artifacts", "Libdl"] uuid = "05823500-19ac-5b8b-9628-191a04bc5112" -version = "0.8.1+0" +version = "0.8.1+2" [[deps.OpenMPI_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"] -git-tree-sha1 = "f3080f4212a8ba2ceb10a34b938601b862094314" +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "PMIx_jll", "TOML", "Zlib_jll", "libevent_jll", "prrte_jll"] +git-tree-sha1 = "f46caf663e069027a06942d00dced37f1eb3d8ad" uuid = "fe0851c0-eecd-5654-98d4-656369965a5c" -version = "4.1.5+0" +version = "5.0.2+0" + +[[deps.OpenSSL_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "3da7367955dcc5c54c1ba4d402ccdc09a1a3e046" +uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" +version = "3.0.13+1" [[deps.OpenSpecFun_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] @@ -423,11 +478,22 @@ git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1" uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" version = "0.5.5+0" +[[deps.PCRE2_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15" +version = "10.42.0+1" + +[[deps.PMIx_jll]] +deps = ["Artifacts", "Hwloc_jll", "JLLWrappers", "Libdl", "Zlib_jll", "libevent_jll"] +git-tree-sha1 = "360f48126b5f2c2f0c833be960097f7c62705976" +uuid = "32165bc3-0280-59bc-8c0b-c33b6203efab" +version = "4.2.9+0" + [[deps.Parsers]] deps = ["Dates", "PrecompileTools", "UUIDs"] -git-tree-sha1 = "716e24b21538abc91f6205fd1d8363f39b442851" +git-tree-sha1 = "8489905bcdbcfac64d1daa51ca07c0d8f0283821" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.7.2" +version = "2.8.1" [[deps.PartitionedArrays]] deps = ["CircularArrays", "Distances", "FillArrays", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "Random", "SparseArrays", "SparseMatricesCSR"] @@ -438,7 +504,7 @@ version = "0.3.4" [[deps.Pkg]] deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -version = "1.9.0" +version = "1.10.0" [[deps.PkgVersion]] deps = ["Pkg"] @@ -448,15 +514,15 @@ version = "0.3.3" [[deps.PrecompileTools]] deps = ["Preferences"] -git-tree-sha1 = "03b4c25b43cb84cee5c90aa9b5ea0a78fd848d2f" +git-tree-sha1 = "5aa36f7049a63a1528fe8f7c3f2113413ffd4e1f" uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a" -version = "1.2.0" +version = "1.2.1" [[deps.Preferences]] deps = ["TOML"] -git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1" +git-tree-sha1 = "9306f6085165d270f7e3db02af26a400d580f5c6" uuid = "21216c6a-2e73-6563-6e65-726566657250" -version = "1.4.0" +version = "1.4.3" [[deps.Printf]] deps = ["Unicode"] @@ -467,7 +533,7 @@ deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" [[deps.Random]] -deps = ["SHA", "Serialization"] +deps = ["SHA"] uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[deps.RecipesBase]] @@ -481,6 +547,12 @@ git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" uuid = "189a3867-3050-52da-a836-e630ba90ab69" version = "1.2.2" +[[deps.RegistryInstances]] +deps = ["LazilyInitializedFields", "Pkg", "TOML", "Tar"] +git-tree-sha1 = "ffd19052caf598b8653b99404058fce14828be51" +uuid = "2792f1a3-b283-48e8-9a74-f99dce5104f3" +version = "0.1.0" + [[deps.Requires]] deps = ["UUIDs"] git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7" @@ -493,9 +565,9 @@ version = "0.7.0" [[deps.SIMD]] deps = ["PrecompileTools"] -git-tree-sha1 = "0e270732477b9e551d884e6b07e23bb2ec947790" +git-tree-sha1 = "2803cab51702db743f3fda07dd1745aadfbf43bd" uuid = "fdea26ae-647d-5447-a871-4b548cad5224" -version = "3.4.5" +version = "3.5.0" [[deps.Serialization]] uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" @@ -506,6 +578,7 @@ uuid = "6462fe0b-24de-5631-8697-dd941f90decc" [[deps.SparseArrays]] deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +version = "1.10.0" [[deps.SparseMatricesCSR]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] @@ -526,15 +599,19 @@ version = "2.3.1" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" [[deps.StaticArrays]] -deps = ["LinearAlgebra", "Random", "StaticArraysCore"] -git-tree-sha1 = "51621cca8651d9e334a659443a74ce50a3b6dfab" +deps = ["LinearAlgebra", "PrecompileTools", "Random", "StaticArraysCore"] +git-tree-sha1 = "bf074c045d3d5ffd956fa0a461da38a44685d6b2" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.6.3" -weakdeps = ["Statistics"] +version = "1.9.3" [deps.StaticArrays.extensions] + StaticArraysChainRulesCoreExt = "ChainRulesCore" StaticArraysStatisticsExt = "Statistics" + [deps.StaticArrays.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + [[deps.StaticArraysCore]] git-tree-sha1 = "36b3d696ce6366023a0ea192b4cd442268995a0d" uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" @@ -543,7 +620,7 @@ version = "1.4.2" [[deps.Statistics]] deps = ["LinearAlgebra", "SparseArrays"] uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" -version = "1.9.0" +version = "1.10.0" [[deps.StatsAPI]] deps = ["LinearAlgebra"] @@ -556,9 +633,9 @@ deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" [[deps.SuiteSparse_jll]] -deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"] +deps = ["Artifacts", "Libdl", "libblastrampoline_jll"] uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" -version = "5.10.1+6" +version = "7.2.1+1" [[deps.TOML]] deps = ["Dates"] @@ -572,9 +649,9 @@ version = "1.10.0" [[deps.Tensors]] deps = ["ForwardDiff", "LinearAlgebra", "PrecompileTools", "SIMD", "StaticArrays", "Statistics"] -git-tree-sha1 = "bcbb366323add300742c9e4a5447e584640aeff2" +git-tree-sha1 = "957f256fb380cad64cae4da39e562ecfb5c3fec9" uuid = "48a634ad-e948-5137-8d70-aa71f2a747f4" -version = "1.15.0" +version = "1.16.1" [[deps.Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] @@ -587,10 +664,13 @@ uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" version = "0.5.23" [[deps.TranscodingStreams]] -deps = ["Random", "Test"] -git-tree-sha1 = "9a6ae7ed916312b41236fcef7e0af564ef934769" +git-tree-sha1 = "71509f04d045ec714c4748c785a59045c3736349" uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.9.13" +version = "0.10.7" +weakdeps = ["Random", "Test"] + + [deps.TranscodingStreams.extensions] + TestExt = ["Test", "Random"] [[deps.UUIDs]] deps = ["Random", "SHA"] @@ -606,32 +686,44 @@ version = "1.0.1" [[deps.WriteVTK]] deps = ["Base64", "CodecZlib", "FillArrays", "LightXML", "TranscodingStreams", "VTKBase"] -git-tree-sha1 = "7b46936613e41cfe1c6a5897d243ddcab8feabec" +git-tree-sha1 = "48b9e8e9c83865e99e57f027d4edfa94e0acddae" uuid = "64499a7a-5c06-52f2-abe2-ccb03c286192" -version = "1.18.0" +version = "1.19.1" [[deps.XML2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Zlib_jll"] -git-tree-sha1 = "04a51d15436a572301b5abbb9d099713327e9fc4" +git-tree-sha1 = "532e22cf7be8462035d092ff21fada7527e2c488" uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" -version = "2.10.4+0" +version = "2.12.6+0" [[deps.Zlib_jll]] deps = ["Libdl"] uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.2.13+0" +version = "1.2.13+1" [[deps.libblastrampoline_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -version = "5.8.0+0" +version = "5.8.0+1" + +[[deps.libevent_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "OpenSSL_jll"] +git-tree-sha1 = "f04ec6d9a186115fb38f858f05c0c4e1b7fc9dcb" +uuid = "1080aeaf-3a6a-583e-a51c-c537b09f60ec" +version = "2.1.13+1" [[deps.nghttp2_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -version = "1.48.0+0" +version = "1.52.0+1" [[deps.p7zip_jll]] deps = ["Artifacts", "Libdl"] uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.4.0+0" +version = "17.4.0+2" + +[[deps.prrte_jll]] +deps = ["Artifacts", "Hwloc_jll", "JLLWrappers", "Libdl", "PMIx_jll", "libevent_jll"] +git-tree-sha1 = "5adb2d7a18a30280feb66cad6f1a1dfdca2dc7b0" +uuid = "eb928a42-fffd-568d-ab9c-3f5d54fc65b9" +version = "3.0.2+0" diff --git a/docs/Project.toml b/docs/Project.toml index 1d95528..d7a7e05 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,10 +1,12 @@ [deps] BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +Ferrite = "c061ca5d-56c9-439f-9c0e-210fe06d3992" FerriteDistributed = "570c3397-5fe4-4792-be0d-48dbf0d14605" HYPRE = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" +MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" MPIPreferences = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" Metis = "2679e427-3c69-5b7f-982b-ece356f1e94b" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" diff --git a/docs/make.jl b/docs/make.jl index 7e4fb04..6893c98 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -25,8 +25,8 @@ makedocs( sitename = "FerriteDistributed.jl", format = Documenter.HTML(), doctest = false, - strict = false, - draft = true, + warnonly = true, + draft = liveserver, pages = Any[ "Home" => "index.md", "Examples" => [GENERATEDEXAMPLES;], diff --git a/docs/src/literate/heat_equation_hypre.jl b/docs/src/literate/heat_equation_hypre.jl index 3f79a04..935b249 100644 --- a/docs/src/literate/heat_equation_hypre.jl +++ b/docs/src/literate/heat_equation_hypre.jl @@ -22,10 +22,12 @@ import FerriteDistributed: getglobalgrid, global_comm, local_dof_range #TODO REM # Launch MPI and HYPRE MPI.Init() HYPRE.Init() +#md nothing # hide # We start generating a simple grid with 10x10x10 hexahedral elements # and distribute it across our processors using `generate_distributed_grid`. -dgrid = generate_nod_grid(MPI.COMM_WORLD, Hexahedron, (10, 10, 10); partitioning_alg=FerriteDistributed.PartitioningAlgorithm.Metis(:RECURSIVE)); +dgrid = generate_nod_grid(MPI.COMM_WORLD, Hexahedron, (10, 10, 10); partitioning_alg=FerriteDistributed.PartitioningAlgorithm.Metis(:RECURSIVE)) +#md nothing # hide # ### Trial and test functions # Nothing changes here. @@ -33,13 +35,15 @@ ref = RefHexahedron ip = Lagrange{ref, 2}() ip_geo = Lagrange{ref, 1}() qr = QuadratureRule{ref}(3) -cellvalues = CellValues(qr, ip, ip_geo); +cellvalues = CellValues(qr, ip, ip_geo) +#md nothing # hide # ### Degrees of freedom # Nothing changes here, too. The constructor takes care of creating the correct distributed dof handler. dh = DofHandler(dgrid) push!(dh, :u, 1, ip) -close!(dh); +close!(dh) +#md nothing # hide # ### Boundary conditions # Nothing has to be changed here either. @@ -48,65 +52,67 @@ ch = ConstraintHandler(dh); dbc = Dirichlet(:u, ∂Ω, (x, t) -> 0) dbc_val = 0 #src dbc = Dirichlet(:u, ∂Ω, (x, t) -> dbc_val) #src -add!(ch, dbc); +add!(ch, dbc) close!(ch) +#md nothing # hide # ### Assembling the linear system -# Assembling the system works also mostly analogue. -function doassemble(cellvalues::CellValues, dh::FerriteDistributed.NODDofHandler{dim}, ch::ConstraintHandler) where {dim} +# Assembling the system works also mostly analogue. We use the same function to assemble the element as in the serial version. +function assemble_element!(Ke, fe, cellvalues, cell_coords::AbstractVector{<:Vec{dim}}) where dim + fill!(Ke, 0) + fill!(fe, 0) + n_basefuncs = getnbasefunctions(cellvalues) - Ke = zeros(n_basefuncs, n_basefuncs) - fe = zeros(n_basefuncs) - - # --------------------- Distributed assembly -------------------- - # The synchronization with the global sparse matrix is handled by - # an assembler again. You can choose from different backends, which - # are described in the docs and will be expaned over time. This call - # may trigger a large amount of communication. + for q_point in 1:getnquadpoints(cellvalues) + dΩ = getdetJdV(cellvalues, q_point) + + for i in 1:n_basefuncs + v = shape_value(cellvalues, q_point, i) + ∇v = shape_gradient(cellvalues, q_point, i) + ## Manufactured solution of Π cos(xᵢπ) + x = spatial_coordinate(cellvalues, q_point, cell_coords) + fe[i] += (π/2)^2 * dim * prod(cos, x*π/2) * v * dΩ + + for j in 1:n_basefuncs + ∇u = shape_gradient(cellvalues, q_point, j) + Ke[i, j] += (∇v ⋅ ∇u) * dΩ + end + end + end +end +#md nothing # hide - # TODO how to put this into an interface? +# The synchronization with the global sparse matrix is handled by +# an assembler again. You can choose from different backends, which +# are described in the docs and will be expaned over time. This call +# may trigger a large amount of communication. +function doassemble(cellvalues::CellValues, dh::FerriteDistributed.NODDofHandler, ch::ConstraintHandler) + ## TODO how to put this into an interface? dgrid = getglobalgrid(dh) comm = global_comm(dgrid) ldofrange = local_dof_range(dh) K = HYPREMatrix(comm, first(ldofrange), last(ldofrange)) f = HYPREVector(comm, first(ldofrange), last(ldofrange)) - assembler = start_assemble(K, f) - # For the local assembly nothing changes - for cell in CellIterator(dh) - fill!(Ke, 0) - fill!(fe, 0) + n_basefuncs = getnbasefunctions(cellvalues) + Ke = zeros(n_basefuncs, n_basefuncs) + fe = zeros(n_basefuncs) + ## For the local assembly nothing changes + for cell in CellIterator(dh) reinit!(cellvalues, cell) - coords = getcoordinates(cell) - - for q_point in 1:getnquadpoints(cellvalues) - dΩ = getdetJdV(cellvalues, q_point) - - for i in 1:n_basefuncs - v = shape_value(cellvalues, q_point, i) - ∇v = shape_gradient(cellvalues, q_point, i) - # Manufactured solution of Π cos(xᵢπ) - x = spatial_coordinate(cellvalues, q_point, coords) - fe[i] += (π/2)^2 * dim * prod(cos, x*π/2) * v * dΩ - - for j in 1:n_basefuncs - ∇u = shape_gradient(cellvalues, q_point, j) - Ke[i, j] += (∇v ⋅ ∇u) * dΩ - end - end - end - + assemble_element!(Ke, fe, cellvalues, getcoordinates(cell)) + ## Local elimination of boundary conditions, because global + ## elimination is not implemented for the HYPRE extension. apply_local!(Ke, fe, celldofs(cell), ch) - - # TODO how to put this into an interface. + ## TODO how to put this into an interface? assemble!(assembler, dh.ldof_to_gdof[celldofs(cell)], fe, Ke) end - # Finally, for the `HYPREAssembler` we have to call - # `end_assemble` to construct the global sparse matrix and the global - # right hand side vector. + ## Finally, for the `HYPREAssembler` we have to call + ## `end_assemble` to construct the global sparse matrix and the global + ## right hand side vector. end_assemble(assembler) return K, f @@ -115,30 +121,34 @@ end # ### Solution of the system # Again, we assemble our problem. Note that we applied the constraints locally. -K, f = doassemble(cellvalues, dh, ch); +K, f = doassemble(cellvalues, dh, ch) +#md nothing # hide # We use CG with AMG preconditioner to solve the system. precond = HYPRE.BoomerAMG() solver = HYPRE.PCG(; Precond = precond) uₕ = HYPRE.solve(solver, K, f) +#md nothing # hide # And convert the solution from HYPRE to Ferrite u_local = Vector{Float64}(undef, FerriteDistributed.num_local_dofs(dh)) FerriteDistributed.extract_local_part!(u_local, uₕ, dh) +#md nothing # hide # ### Exporting via PVTK # To visualize the result we export the grid and our field `u` # to a VTK-file, which can be viewed in e.g. [ParaView](https://www.paraview.org/). +# For debugging purposes it can be helpful to enrich +# the visualization with some meta information about +# the grid and its partitioning. vtk_grid("heat_equation_distributed", dh) do vtk vtk_point_data(vtk, dh, u_local) - # For debugging purposes it can be helpful to enrich - # the visualization with some meta information about - # the grid and its partitioning vtk_shared_vertices(vtk, dgrid) vtk_shared_faces(vtk, dgrid) vtk_shared_edges(vtk, dgrid) #src vtk_partitioning(vtk, dgrid) end +#md nothing # hide ## Test the result against the manufactured solution #src using Test #src diff --git a/docs/src/literate/heat_equation_pa.jl b/docs/src/literate/heat_equation_pa.jl index ca31a60..3469e17 100644 --- a/docs/src/literate/heat_equation_pa.jl +++ b/docs/src/literate/heat_equation_pa.jl @@ -20,10 +20,12 @@ using IterativeSolvers # Launch MPI MPI.Init() +#md nothing # hide # We start generating a simple grid with 20x20 quadrilateral elements # and distribute it across our processors using `generate_distributed_grid`. -dgrid = generate_nod_grid(MPI.COMM_WORLD, Hexahedron, (10, 10, 10); partitioning_alg=FerriteDistributed.PartitioningAlgorithm.Metis(:RECURSIVE)); +dgrid = generate_nod_grid(MPI.COMM_WORLD, Hexahedron, (10, 10, 10); partitioning_alg=FerriteDistributed.PartitioningAlgorithm.Metis(:RECURSIVE)) +#md nothing # hide # ### Trial and test functions # Nothing changes here. @@ -31,13 +33,15 @@ ref = RefHexahedron ip = Lagrange{ref, 2}() ip_geo = Lagrange{ref, 1}() qr = QuadratureRule{ref}(3) -cellvalues = CellValues(qr, ip, ip_geo); +cellvalues = CellValues(qr, ip, ip_geo) +#md nothing # hide # ### Degrees of freedom # Nothing changes here, too. The constructor takes care of creating the correct distributed dof handler. dh = DofHandler(dgrid) add!(dh, :u, ip) -close!(dh); +close!(dh) +#md nothing # hide # ### Boundary conditions # Nothing has to be changed here either. @@ -46,86 +50,93 @@ ch = ConstraintHandler(dh); dbc = Dirichlet(:u, ∂Ω, (x, t) -> 0) dbc_val = 0 #src dbc = Dirichlet(:u, ∂Ω, (x, t) -> dbc_val) #src -add!(ch, dbc); +add!(ch, dbc) close!(ch) -update!(ch, 0.0); +#md nothing # hide # ### Assembling the linear system # Assembling the system works also mostly analogue. Note that the dof handler type changed. -function doassemble(cellvalues::CellValues, dh::FerriteDistributed.NODDofHandler{dim}) where {dim} +# We use the same function to assemble the element as in the serial version. +function assemble_element!(Ke, fe, cellvalues, cell_coords::AbstractVector{<:Vec{dim}}) where dim + fill!(Ke, 0) + fill!(fe, 0) + n_basefuncs = getnbasefunctions(cellvalues) - Ke = zeros(n_basefuncs, n_basefuncs) - fe = zeros(n_basefuncs) + for q_point in 1:getnquadpoints(cellvalues) + dΩ = getdetJdV(cellvalues, q_point) + + for i in 1:n_basefuncs + v = shape_value(cellvalues, q_point, i) + ∇v = shape_gradient(cellvalues, q_point, i) + ## Manufactured solution of Π cos(xᵢπ) + x = spatial_coordinate(cellvalues, q_point, cell_coords) + fe[i] += (π/2)^2 * dim * prod(cos, x*π/2) * v * dΩ + + for j in 1:n_basefuncs + ∇u = shape_gradient(cellvalues, q_point, j) + Ke[i, j] += (∇v ⋅ ∇u) * dΩ + end + end + end +end +#md nothing # hide - # --------------------- Distributed assembly -------------------- - # The synchronization with the global sparse matrix is handled by - # an assembler again. You can choose from different backends, which - # are described in the docs and will be expaned over time. This call - # may trigger a large amount of communication. - # NOTE: At the time of writing the only backend available is a COO - # assembly via PartitionedArrays.jl . +# The synchronization with the global sparse matrix is handled by +# an assembler again. You can choose from different backends, which +# are described in the docs and will be expaned over time. This call +# may trigger a large amount of communication. +# NOTE: At the time of writing the only backend available is a COO +# assembly via PartitionedArrays.jl. +function doassemble(cellvalues::CellValues, dh::FerriteDistributed.NODDofHandler{dim}) where {dim} assembler = start_assemble(dh, distribute_with_mpi(LinearIndices((MPI.Comm_size(MPI.COMM_WORLD),)))) - # For the local assembly nothing changes + ## For the local assembly nothing changes + n_basefuncs = getnbasefunctions(cellvalues) + Ke = zeros(n_basefuncs, n_basefuncs) + fe = zeros(n_basefuncs) for cell in CellIterator(dh) fill!(Ke, 0) fill!(fe, 0) - reinit!(cellvalues, cell) - coords = getcoordinates(cell) - - for q_point in 1:getnquadpoints(cellvalues) - dΩ = getdetJdV(cellvalues, q_point) - - for i in 1:n_basefuncs - v = shape_value(cellvalues, q_point, i) - ∇v = shape_gradient(cellvalues, q_point, i) - # Manufactured solution of Π cos(xᵢπ) - x = spatial_coordinate(cellvalues, q_point, coords) - fe[i] += (π/2)^2 * dim * prod(cos, x*π/2) * v * dΩ - - for j in 1:n_basefuncs - ∇u = shape_gradient(cellvalues, q_point, j) - Ke[i, j] += (∇v ⋅ ∇u) * dΩ - end - end - end - - # Note that this call should be communication-free! + assemble_element!(Ke, fe, cellvalues, getcoordinates(cell)) + ## Note that this call should be communication-free! Ferrite.assemble!(assembler, celldofs(cell), fe, Ke) end - # Finally, for the `PartitionedArraysCOOAssembler` we have to call - # `end_assemble` to construct the global sparse matrix and the global - # right hand side vector. + ## Finally, for the `PartitionedArraysCOOAssembler` we have to call + ## `end_assemble` to construct the global sparse matrix and the global + ## right hand side vector. return end_assemble(assembler) end #md nothing # hide # ### Solution of the system # Again, we assemble our problem and apply the constraints as needed. -K, f = doassemble(cellvalues, dh); +K, f = doassemble(cellvalues, dh) apply!(K, f, ch) +#md nothing # hide # To compute the solution we utilize conjugate gradients because at the time of writing # this is the only available scalable working solver. # Additional note: At the moment of writing this we have no good preconditioners for PSparseMatrix in Julia, # partly due to unimplemented multiplication operators for the matrix data type. u = cg(K, f) +#md nothing # hide # ### Exporting via PVTK # To visualize the result we export the grid and our field `u` # to a VTK-file, which can be viewed in e.g. [ParaView](https://www.paraview.org/). +# For debugging purposes it can be helpful to enrich +# the visualization with some meta information about +# the grid and its partitioning. vtk_grid("heat_equation_distributed", dh) do vtk vtk_point_data(vtk, dh, u) - # For debugging purposes it can be helpful to enrich - # the visualization with some meta information about - # the grid and its partitioning vtk_shared_vertices(vtk, dgrid) vtk_shared_faces(vtk, dgrid) vtk_shared_edges(vtk, dgrid) #src vtk_partitioning(vtk, dgrid) end +#md nothing # hide ## Test the result against the manufactured solution #src using Test #src @@ -133,7 +144,7 @@ for cell in CellIterator(dh) #src reinit!(cellvalues, cell) #src n_basefuncs = getnbasefunctions(cellvalues) #src coords = getcoordinates(cell) #src - map(local_values(u)) do u_local #src + map(local_values(u)) do u_local #src uₑ = u_local[celldofs(cell)] #src for q_point in 1:getnquadpoints(cellvalues) #src x = spatial_coordinate(cellvalues, q_point, coords) #src diff --git a/ext/FerriteDistributedHYPREAssembly.jl b/ext/FerriteDistributedHYPREAssembly.jl index bbfb654..ca39cdb 100644 --- a/ext/FerriteDistributedHYPREAssembly.jl +++ b/ext/FerriteDistributedHYPREAssembly.jl @@ -5,7 +5,7 @@ module FerriteDistributedHYPREAssembly using FerriteDistributed # TODO remove me. These are merely hotfixes to split the extensions trasiently via an internal API. -import FerriteDistributed: getglobalgrid, num_local_true_dofs, num_local_dofs, global_comm, interface_comm, global_rank, compute_owner, remote_entities, num_fields +import FerriteDistributed: getglobalgrid, num_local_true_dofs, num_local_dofs, global_comm, interface_comm, global_rank, compute_owner, remote_entities, num_fields, local_entities using MPI using HYPRE using Base: @propagate_inbounds diff --git a/ext/FerriteDistributedHYPREAssembly/conversion.jl b/ext/FerriteDistributedHYPREAssembly/conversion.jl index 7c0c244..e571caf 100644 --- a/ext/FerriteDistributedHYPREAssembly/conversion.jl +++ b/ext/FerriteDistributedHYPREAssembly/conversion.jl @@ -27,16 +27,17 @@ function FerriteDistributed.extract_local_part!(u::Vector{T}, uh::HYPREVector, d # TODO speed this up and better API dgrid = getglobalgrid(dh) for sv ∈ get_shared_vertices(dgrid) - lvi = sv.local_idx my_rank != compute_owner(dgrid, sv) && continue - for field_idx in 1:num_fields(dh) - if has_vertex_dofs(dh, field_idx, lvi) - local_dofs = vertex_dofs(dh, field_idx, lvi) - global_dofs = dh.ldof_to_gdof[local_dofs] - for receiver_rank ∈ keys(remote_entities(sv)) - for i ∈ 1:length(global_dofs) - # Note that u already has the correct values for all locally owned dofs due to the loop above! - gdof_value_send[receiver_rank][global_dofs[i]] = u[local_dofs[i]] + for lvi ∈ local_entities(sv) + for field_idx in 1:num_fields(dh) + if has_vertex_dofs(dh, field_idx, lvi) + local_dofs = vertex_dofs(dh, field_idx, lvi) + global_dofs = dh.ldof_to_gdof[local_dofs] + for receiver_rank ∈ keys(remote_entities(sv)) + for i ∈ 1:length(global_dofs) + # Note that u already has the correct values for all locally owned dofs due to the loop above! + gdof_value_send[receiver_rank][global_dofs[i]] = u[local_dofs[i]] + end end end end @@ -44,16 +45,17 @@ function FerriteDistributed.extract_local_part!(u::Vector{T}, uh::HYPREVector, d end for se ∈ get_shared_edges(dgrid) - lei = se.local_idx my_rank != compute_owner(dgrid, se) && continue - for field_idx in 1:num_fields(dh) - if has_edge_dofs(dh, field_idx, lei) - local_dofs = edge_dofs(dh, field_idx, lei) - global_dofs = dh.ldof_to_gdof[local_dofs] - for receiver_rank ∈ keys(remote_entities(se)) - for i ∈ 1:length(global_dofs) - # Note that u already has the correct values for all locally owned dofs due to the loop above! - gdof_value_send[receiver_rank][global_dofs[i]] = u[local_dofs[i]] + for lei ∈ local_entities(se) + for field_idx in 1:num_fields(dh) + if has_edge_dofs(dh, field_idx, lei) + local_dofs = edge_dofs(dh, field_idx, lei) + global_dofs = dh.ldof_to_gdof[local_dofs] + for receiver_rank ∈ keys(remote_entities(se)) + for i ∈ 1:length(global_dofs) + # Note that u already has the correct values for all locally owned dofs due to the loop above! + gdof_value_send[receiver_rank][global_dofs[i]] = u[local_dofs[i]] + end end end end @@ -61,16 +63,17 @@ function FerriteDistributed.extract_local_part!(u::Vector{T}, uh::HYPREVector, d end for sf ∈ get_shared_faces(dgrid) - lfi = sf.local_idx my_rank != compute_owner(dgrid, sf) && continue - for field_idx in 1:num_fields(dh) - if has_face_dofs(dh, field_idx, lfi) - local_dofs = face_dofs(dh, field_idx, lfi) - global_dofs = dh.ldof_to_gdof[local_dofs] - for receiver_rank ∈ keys(remote_entities(sf)) - for i ∈ 1:length(global_dofs) - # Note that u already has the correct values for all locally owned dofs due to the loop above! - gdof_value_send[receiver_rank][global_dofs[i]] = u[local_dofs[i]] + for lfi ∈ local_entities(sf) + for field_idx in 1:num_fields(dh) + if has_face_dofs(dh, field_idx, lfi) + local_dofs = face_dofs(dh, field_idx, lfi) + global_dofs = dh.ldof_to_gdof[local_dofs] + for receiver_rank ∈ keys(remote_entities(sf)) + for i ∈ 1:length(global_dofs) + # Note that u already has the correct values for all locally owned dofs due to the loop above! + gdof_value_send[receiver_rank][global_dofs[i]] = u[local_dofs[i]] + end end end end diff --git a/ext/FerriteDistributedPartitionedArrays.jl b/ext/FerriteDistributedPartitionedArrays.jl index d083683..0bcd5b4 100644 --- a/ext/FerriteDistributedPartitionedArrays.jl +++ b/ext/FerriteDistributedPartitionedArrays.jl @@ -6,7 +6,7 @@ module FerriteDistributedPartitionedArrays using FerriteDistributed # TODO remove me. These are merely hotfixes to split the extensions trasiently via an internal API. import FerriteDistributed: getglobalgrid, num_global_dofs, num_local_true_dofs, num_local_dofs, global_comm, interface_comm, global_rank, compute_owner, remote_entities, - num_fields, getfieldnames, getdim + num_fields, getfieldnames, getdim, local_entities using MPI using PartitionedArrays using Base: @propagate_inbounds diff --git a/ext/FerriteDistributedPartitionedArrays/assembler.jl b/ext/FerriteDistributedPartitionedArrays/assembler.jl index b06889f..979e94b 100644 --- a/ext/FerriteDistributedPartitionedArrays/assembler.jl +++ b/ext/FerriteDistributedPartitionedArrays/assembler.jl @@ -106,66 +106,31 @@ struct COOAssembler{T} ghost_dof_field_index_to_send = [Int[] for i ∈ 1:destination_len] ghost_dof_owner = [Int[] for i ∈ 1:destination_len] # corresponding owner ghost_dof_pivot_to_send = [Int[] for i ∈ 1:destination_len] # corresponding dof to interact with - for (pivot_vertex, pivot_shared_vertex) ∈ dgrid.shared_vertices + for pivot_shared_vertex ∈ get_shared_vertices(dgrid) # Start by searching shared entities which are not owned pivot_vertex_owner_rank = compute_owner(dgrid, pivot_shared_vertex) - pivot_cell_idx = pivot_vertex[1] - - if my_rank != pivot_vertex_owner_rank - sender_slot = destination_index[pivot_vertex_owner_rank] - - Ferrite.@debug println("$pivot_vertex may require synchronization (R$my_rank)") - # Note: We have to send ALL dofs on the element to the remote. - cell_dofs_upper_bound = (pivot_cell_idx == getncells(dh.grid)) ? length(dh.cell_dofs) : dh.cell_dofs_offset[pivot_cell_idx+1] - cell_dofs = dh.cell_dofs[dh.cell_dofs_offset[pivot_cell_idx]:cell_dofs_upper_bound] - - for (field_idx, field_name) in zip(1:num_fields(dh), getfieldnames(dh)) - !has_vertex_dofs(dh, field_idx, pivot_vertex) && continue - pivot_vertex_dofs = vertex_dofs(dh, field_idx, pivot_vertex) - - for d ∈ 1:dh.field_dims[field_idx] - Ferrite.@debug println(" adding dof $(pivot_vertex_dofs[d]) to ghost sync synchronization on slot $sender_slot (R$my_rank)") - - # Extract dofs belonging to the current field - #cell_field_dofs = cell_dofs[dof_range(dh, field_name)] - #for cell_field_dof ∈ cell_field_dofs - for cell_dof ∈ cell_dofs - append!(ghost_dof_pivot_to_send[sender_slot], ldof_to_gdof[pivot_vertex_dofs[d]]) - append!(ghost_dof_to_send[sender_slot], ldof_to_gdof[cell_dof]) - append!(ghost_rank_to_send[sender_slot], ldof_to_rank[cell_dof]) - append!(ghost_dof_field_index_to_send[sender_slot], field_idx) - end - end - end - end - end - - if dim > 1 - for (pivot_face, pivot_shared_face) ∈ dgrid.shared_faces - # Start by searching shared entities which are not owned - pivot_face_owner_rank = compute_owner(dgrid, pivot_shared_face) - pivot_cell_idx = pivot_face[1] + for pivot_vi ∈ local_entities(pivot_shared_vertex) + pivot_cell_idx = pivot_vi[1] + if my_rank != pivot_vertex_owner_rank + sender_slot = destination_index[pivot_vertex_owner_rank] - if my_rank != pivot_face_owner_rank - sender_slot = destination_index[pivot_face_owner_rank] - - Ferrite.@debug println("$pivot_face may require synchronization (R$my_rank)") + Ferrite.@debug println("$pivot_shared_vertex may require synchronization (R$my_rank)") # Note: We have to send ALL dofs on the element to the remote. cell_dofs_upper_bound = (pivot_cell_idx == getncells(dh.grid)) ? length(dh.cell_dofs) : dh.cell_dofs_offset[pivot_cell_idx+1] cell_dofs = dh.cell_dofs[dh.cell_dofs_offset[pivot_cell_idx]:cell_dofs_upper_bound] for (field_idx, field_name) in zip(1:num_fields(dh), getfieldnames(dh)) - !has_face_dofs(dh, field_idx, pivot_face) && continue - pivot_face_dofs = face_dofs(dh, field_idx, pivot_face) + !has_vertex_dofs(dh, field_idx, pivot_vi) && continue + pivot_vertex_dofs = vertex_dofs(dh, field_idx, pivot_vi) for d ∈ 1:dh.field_dims[field_idx] - Ferrite.@debug println(" adding dof $(pivot_face_dofs[d]) to ghost sync synchronization on slot $sender_slot (R$my_rank)") + Ferrite.@debug println(" adding dof $(pivot_vertex_dofs[d]) to ghost sync synchronization on slot $sender_slot (R$my_rank)") # Extract dofs belonging to the current field #cell_field_dofs = cell_dofs[dof_range(dh, field_name)] #for cell_field_dof ∈ cell_field_dofs for cell_dof ∈ cell_dofs - append!(ghost_dof_pivot_to_send[sender_slot], ldof_to_gdof[pivot_face_dofs[d]]) + append!(ghost_dof_pivot_to_send[sender_slot], ldof_to_gdof[pivot_vertex_dofs[d]]) append!(ghost_dof_to_send[sender_slot], ldof_to_gdof[cell_dof]) append!(ghost_rank_to_send[sender_slot], ldof_to_rank[cell_dof]) append!(ghost_dof_field_index_to_send[sender_slot], field_idx) @@ -176,31 +141,33 @@ struct COOAssembler{T} end end - if dim > 2 - for (pivot_edge, pivot_shared_edge) ∈ dgrid.shared_edges + if dim > 1 + for pivot_shared_face ∈ get_shared_faces(dgrid) # Start by searching shared entities which are not owned - pivot_edge_owner_rank = compute_owner(dgrid, pivot_shared_edge) - pivot_cell_idx = pivot_edge[1] + pivot_face_owner_rank = compute_owner(dgrid, pivot_shared_face) + pivot_fi = local_entities(pivot_shared_face)[1] + pivot_cell_idx = pivot_fi[1] - if my_rank != pivot_edge_owner_rank - sender_slot = destination_index[pivot_edge_owner_rank] + if my_rank != pivot_face_owner_rank + sender_slot = destination_index[pivot_face_owner_rank] - Ferrite.@debug println("$pivot_edge may require synchronization (R$my_rank)") + Ferrite.@debug println("$pivot_shared_face may require synchronization (R$my_rank)") # Note: We have to send ALL dofs on the element to the remote. cell_dofs_upper_bound = (pivot_cell_idx == getncells(dh.grid)) ? length(dh.cell_dofs) : dh.cell_dofs_offset[pivot_cell_idx+1] cell_dofs = dh.cell_dofs[dh.cell_dofs_offset[pivot_cell_idx]:cell_dofs_upper_bound] for (field_idx, field_name) in zip(1:num_fields(dh), getfieldnames(dh)) - !has_edge_dofs(dh, field_idx, pivot_edge) && continue - pivot_edge_dofs = edge_dofs(dh, field_idx, pivot_edge) + !has_face_dofs(dh, field_idx, pivot_fi) && continue + pivot_face_dofs = face_dofs(dh, field_idx, pivot_fi) for d ∈ 1:dh.field_dims[field_idx] - Ferrite.@debug println(" adding dof $(pivot_edge_dofs[d]) to ghost sync synchronization on slot $sender_slot (R$my_rank)") + Ferrite.@debug println(" adding dof $(pivot_face_dofs[d]) to ghost sync synchronization on slot $sender_slot (R$my_rank)") + # Extract dofs belonging to the current field #cell_field_dofs = cell_dofs[dof_range(dh, field_name)] #for cell_field_dof ∈ cell_field_dofs for cell_dof ∈ cell_dofs - append!(ghost_dof_pivot_to_send[sender_slot], ldof_to_gdof[pivot_edge_dofs[d]]) + append!(ghost_dof_pivot_to_send[sender_slot], ldof_to_gdof[pivot_face_dofs[d]]) append!(ghost_dof_to_send[sender_slot], ldof_to_gdof[cell_dof]) append!(ghost_rank_to_send[sender_slot], ldof_to_rank[cell_dof]) append!(ghost_dof_field_index_to_send[sender_slot], field_idx) @@ -211,6 +178,43 @@ struct COOAssembler{T} end end + if dim > 2 + for pivot_shared_edge ∈ get_shared_edges(dgrid) + # Start by searching shared entities which are not owned + pivot_edge_owner_rank = compute_owner(dgrid, pivot_shared_edge) + for pivot_ei ∈ local_entities(pivot_shared_edge) + pivot_cell_idx = pivot_ei[1] + + if my_rank != pivot_edge_owner_rank + sender_slot = destination_index[pivot_edge_owner_rank] + + Ferrite.@debug println("$pivot_shared_edge may require synchronization (R$my_rank)") + # Note: We have to send ALL dofs on the element to the remote. + cell_dofs_upper_bound = (pivot_cell_idx == getncells(dh.grid)) ? length(dh.cell_dofs) : dh.cell_dofs_offset[pivot_cell_idx+1] + cell_dofs = dh.cell_dofs[dh.cell_dofs_offset[pivot_cell_idx]:cell_dofs_upper_bound] + + for (field_idx, field_name) in zip(1:num_fields(dh), getfieldnames(dh)) + !has_edge_dofs(dh, field_idx, pivot_ei) && continue + pivot_edge_dofs = edge_dofs(dh, field_idx, pivot_ei) + + for d ∈ 1:dh.field_dims[field_idx] + Ferrite.@debug println(" adding dof $(pivot_edge_dofs[d]) to ghost sync synchronization on slot $sender_slot (R$my_rank)") + # Extract dofs belonging to the current field + #cell_field_dofs = cell_dofs[dof_range(dh, field_name)] + #for cell_field_dof ∈ cell_field_dofs + for cell_dof ∈ cell_dofs + append!(ghost_dof_pivot_to_send[sender_slot], ldof_to_gdof[pivot_edge_dofs[d]]) + append!(ghost_dof_to_send[sender_slot], ldof_to_gdof[cell_dof]) + append!(ghost_rank_to_send[sender_slot], ldof_to_rank[cell_dof]) + append!(ghost_dof_field_index_to_send[sender_slot], field_idx) + end + end + end + end + end + end + end + ghost_send_buffer_lengths = Int[length(i) for i ∈ ghost_dof_to_send] ghost_recv_buffer_lengths = zeros(Int, destination_len) MPI.Neighbor_alltoall!(UBuffer(ghost_send_buffer_lengths,1), UBuffer(ghost_recv_buffer_lengths,1), interface_comm(dgrid)); diff --git a/ext/FerriteDistributedPartitionedArrays/constraints.jl b/ext/FerriteDistributedPartitionedArrays/constraints.jl index b3abeca..0f5aeea 100644 --- a/ext/FerriteDistributedPartitionedArrays/constraints.jl +++ b/ext/FerriteDistributedPartitionedArrays/constraints.jl @@ -66,11 +66,8 @@ function Ferrite.apply!(K::PartitionedArrays.PSparseMatrix, f::PartitionedArrays # remote_ghost_gdofs, remote_ghost_parts = map(K.col_partition) do partition partition = K.col_partition.item_ref[] remote_ghost_ldofs = partition.ghost_to_local - @show remote_ghost_ldofs remote_ghost_parts = partition.local_to_owner[remote_ghost_ldofs] - @show remote_ghost_parts remote_ghost_gdofs = partition.local_to_global[remote_ghost_ldofs] - @show remote_ghost_gdofs # return (remote_ghost_gdofs, remote_ghost_parts) # end diff --git a/src/Entity.jl b/src/Entity.jl new file mode 100644 index 0000000..84758c8 --- /dev/null +++ b/src/Entity.jl @@ -0,0 +1,88 @@ +""" + VertexRepresentation + +We can identify a vertex uniquely by the node number. +""" +struct VertexRepresentation + node::Int +end + +""" + EdgeRepresentation + +We can identify an edge uniquely by the sorted node numbers associated with the end points. +""" +struct EdgeRepresentation + a::Int + b::Int +end + +function EdgeRepresentation(ab::Tuple{Int,Int}) + se = Ferrite.sortedge_fast(ab) + return EdgeRepresentation(se[1], se[2]) +end + +""" + FaceRepresentation + +We can identify a face uniquely by 3 sorted node numbers associated with the vertices. +3 points are sufficient, because 3 (non-aligned) points can uniquely describe a surface in 3D. +""" +struct FaceRepresentation + a::Int + b::Int + c::Int +end + + +function FaceRepresentation(ab::Tuple{Int,Int}) + @warn "Fixme after https://github.com/Ferrite-FEM/Ferrite.jl/pull/789" maxlog=1 + sf = Ferrite.sortface_fast(ab) + return FaceRepresentation(-1, sf[1], sf[2]) +end + +function FaceRepresentation(abc::Union{Tuple{Int,Int,Int}, Tuple{Int,Int,Int,Int}}) + sf = Ferrite.sortface_fast(abc) + return FaceRepresentation(sf[1], sf[2], sf[3]) +end + + +""" + Entity + +Supertype for geometric entities. +""" +abstract type Entity end + +# !!!THE STUFF BELOW IS CURRENTLY UNUSED!!! + +""" + Vertex <: Entity + +A shared vertex induced by a local vertex index and all remote vertex indices on all remote ranks. +""" +struct Vertex <: Entity + unique_local_representation::VertexRepresentation # Identify via node in grid + local_vertices::Vector{VertexIndex} +end + +""" + Face <: Entity + +A face induced by a local face index and all remote face indices on all remote ranks. +""" +struct Face <: Entity + unique_representation::FaceRepresentation + local_faces::Pair{FaceIndex,FaceIndex} +end + +""" + Edge <: Entity + +An edge induced by a local edge index and all remote edge indices on all remote ranks. +""" +struct Edge <: Entity + unique_representation::EdgeRepresentation # Identify via node in grid + local_edges::Vector{EdgeIndex} +end + diff --git a/src/FerriteDistributed.jl b/src/FerriteDistributed.jl index 2a0d6fd..cbaad0c 100644 --- a/src/FerriteDistributed.jl +++ b/src/FerriteDistributed.jl @@ -8,7 +8,7 @@ import Ferrite: get_coordinate_eltype, ScalarWrapper, @debug, nnodes_per_cell, n_components, get_grid, getdim, BoundaryIndex, FaceIndex, EdgeIndex, CellIndex, VertexIndex, AbstractTopology, EntityNeighborhood, - AbstractCell, boundaryfunction, faces, edges, vertices, nvertices, nfaces, nedges, + AbstractGrid, AbstractCell, boundaryfunction, faces, edges, vertices, nvertices, nfaces, nedges, cellnodes!, cellcoords!, getfieldnames, getfieldinterpolation, default_interpolation, reference_coordinates, value, getrefshape, dof_range, getfielddim, @@ -18,6 +18,7 @@ include("utils.jl") include("CoverTopology.jl") +include("Entity.jl") include("SharedEntity.jl") include("interface.jl") diff --git a/src/NODDofHandler.jl b/src/NODDofHandler.jl index 844ee93..30b4a34 100644 --- a/src/NODDofHandler.jl +++ b/src/NODDofHandler.jl @@ -163,31 +163,34 @@ function compute_dof_ownership(dh::NODDofHandler) fill!(dof_owner, my_rank) for sv ∈ get_shared_vertices(dgrid) - lvi = sv.local_idx - for field_idx in 1:num_fields(dh) - if has_vertex_dofs(dh, field_idx, lvi) - local_dofs = vertex_dofs(dh, field_idx, lvi) - dof_owner[local_dofs] .= compute_owner(dgrid, sv) + for lvi ∈ local_entities(sv) + for field_idx in 1:num_fields(dh) + if has_vertex_dofs(dh, field_idx, lvi) + local_dofs = vertex_dofs(dh, field_idx, lvi) + dof_owner[local_dofs] .= compute_owner(dgrid, sv) + end end end end for sf ∈ get_shared_faces(dgrid) - lfi = sf.local_idx - for field_idx in 1:num_fields(dh) - if has_face_dofs(dh, field_idx, lfi) - local_dofs = face_dofs(dh, field_idx, lfi) - dof_owner[local_dofs] .= compute_owner(dgrid, sf) + for lfi ∈ local_entities(sf) + for field_idx in 1:num_fields(dh) + if has_face_dofs(dh, field_idx, lfi) + local_dofs = face_dofs(dh, field_idx, lfi) + dof_owner[local_dofs] .= compute_owner(dgrid, sf) + end end end end for se ∈ get_shared_edges(dgrid) - lei = se.local_idx - for field_idx in 1:num_fields(dh) - if has_edge_dofs(dh, field_idx, lei) - local_dofs = edge_dofs(dh, field_idx, lei) - dof_owner[local_dofs] .= compute_owner(dgrid, se) + for lei ∈ local_entities(se) + for field_idx in 1:num_fields(dh) + if has_edge_dofs(dh, field_idx, lei) + local_dofs = edge_dofs(dh, field_idx, lei) + dof_owner[local_dofs] .= compute_owner(dgrid, se) + end end end end @@ -384,14 +387,14 @@ function local_to_global_numbering(dh::NODDofHandler{dim}) where {dim} faces_send[remote_rank] = FaceIndex[] end Ferrite.@debug println(" prepare sending face #$(lfi) to $remote_rank (R$my_rank)") - for i ∈ svs + # for i ∈ svs push!(faces_send[remote_rank],lfi) - end + # end elseif master_rank == remote_rank # dof is owned by remote - we have to receive information if !haskey(n_faces_recv,remote_rank) - n_faces_recv[remote_rank] = length(svs) + n_faces_recv[remote_rank] = 1#length(svs) else - n_faces_recv[remote_rank] += length(svs) + n_faces_recv[remote_rank] += 1#length(svs) end Ferrite.@debug println(" prepare receiving face #$(lfi) from $remote_rank (R$my_rank)") end @@ -459,7 +462,7 @@ function local_to_global_numbering(dh::NODDofHandler{dim}) where {dim} remote_cell_vis = Array{Int64}(undef,n_vertices) next_buffer_idx = 1 for lvi ∈ vertices_send[remote_rank] - sv = dgrid.shared_vertices[lvi] + sv = get_shared_vertex(dgrid, lvi) @assert haskey(sv.remote_vertices, remote_rank) for (cvi, llvi) ∈ sv.remote_vertices[remote_rank][1:1] # Just don't ask :) remote_cells[next_buffer_idx] = cvi @@ -496,14 +499,16 @@ function local_to_global_numbering(dh::NODDofHandler{dim}) where {dim} remote_cells = Array{Int64}(undef,n_faces) remote_cell_vis = Array{Int64}(undef,n_faces) next_buffer_idx = 1 - for lvi ∈ faces_send[remote_rank] - sv = dgrid.shared_faces[lvi] - @assert haskey(sv.remote_faces, remote_rank) - for (cvi, llvi) ∈ sv.remote_faces[remote_rank][1:1] # Just don't ask :) + for lfi ∈ faces_send[remote_rank] + sv = get_shared_face(dgrid, lfi) + remote_faces = remote_entities(sv) + @assert haskey(remote_faces, remote_rank) + # for (cvi, llfi) ∈ remote_faces[remote_rank] + (cvi, llfi) =remote_faces[remote_rank] remote_cells[next_buffer_idx] = cvi - remote_cell_vis[next_buffer_idx] = llvi + remote_cell_vis[next_buffer_idx] = llfi next_buffer_idx += 1 - end + # end end MPI.Send(remote_cells, global_comm(dgrid); dest=remote_rank-1) MPI.Send(remote_cell_vis, global_comm(dgrid); dest=remote_rank-1) @@ -544,12 +549,12 @@ function local_to_global_numbering(dh::NODDofHandler{dim}) where {dim} remote_cells = Array{Int64}(undef,n_edges) remote_cell_vis = Array{Int64}(undef,n_edges) next_buffer_idx = 1 - for lvi ∈ edges_send_unique - sv = dgrid.shared_edges[lvi] + for lei ∈ edges_send_unique + sv = get_shared_edge(dgrid, lei) @assert haskey(sv.remote_edges, remote_rank) - for (cvi, llvi) ∈ sv.remote_edges[remote_rank][1:1] # Just don't ask :) + for (cvi, llei) ∈ sv.remote_edges[remote_rank][1:1] # Just don't ask :) remote_cells[next_buffer_idx] = cvi - remote_cell_vis[next_buffer_idx] = llvi + remote_cell_vis[next_buffer_idx] = llei next_buffer_idx += 1 end end diff --git a/src/NODGrid.jl b/src/NODGrid.jl index e441cad..36f0cb5 100644 --- a/src/NODGrid.jl +++ b/src/NODGrid.jl @@ -18,9 +18,9 @@ mutable struct NODGrid{dim,C<:AbstractCell,T<:Real} <: AbstractNODGrid{dim} local_grid::Grid{dim,C,T} # Local copies of the shared entities of the form (local index, (process id in grid_comm, remote index)) # The entities consistently contain their *Index, because faces and edges are not materialized. - shared_vertices::Dict{VertexIndex,SharedVertex} - shared_edges::Dict{EdgeIndex,SharedEdge} - shared_faces::Dict{FaceIndex,SharedFace} + shared_vertices::Dict{VertexRepresentation, SharedVertex} + shared_edges::Dict{EdgeRepresentation, SharedEdge} + shared_faces::Dict{FaceRepresentation, SharedFace} end """ @@ -69,10 +69,9 @@ end NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_topology::CoverTopology, partitioning::Vector{<:Integer}) Construct a non-overlapping distributed grid from a grid with given topology and partitioning on a specified MPI communicator. - -""" -function NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_topology::CoverTopology, partitioning::Vector{<:Integer}) where {dim,C,T} - n_cells_global = getncells(grid_to_distribute) +""" +function NODGrid(grid_comm::MPI.Comm, global_grid::Grid{dim,C,T}, grid_topology::CoverTopology, partitioning::Vector{<:Integer}) where {dim,C,T} + n_cells_global = getncells(global_grid) @assert n_cells_global > 0 "Please provide a non-empty input mesh." partmin,partmax = extrema(partitioning) @@ -83,7 +82,7 @@ function NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_to # Start extraction of local grid # 1. Extract local cells - local_cells = getcells(grid_to_distribute)[[i for i ∈ 1:n_cells_global if partitioning[i] == my_rank]] + local_cells = getcells(global_grid)[[i for i ∈ 1:n_cells_global if partitioning[i] == my_rank]] @assert length(local_cells) > 0 # Cannot handle empty partitions yet # 2. Find unique nodes @@ -105,7 +104,7 @@ function NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_to # 4. Extract local nodes local_nodes = Vector{Node{dim,T}}(undef,length(local_node_index_set)) begin - global_nodes = getnodes(grid_to_distribute) + global_nodes = getnodes(global_grid) for global_node_idx ∈ local_node_index_set local_node_idx = global_to_local_node_map[global_node_idx] local_nodes[local_node_idx] = global_nodes[global_node_idx] @@ -133,9 +132,9 @@ function NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_to end cellsets = Dict{String,Set{Int}}() - for key ∈ keys(grid_to_distribute.cellsets) + for key ∈ keys(global_grid.cellsets) cellsets[key] = Set{Int}() # create empty set, so it does not crash during assembly - for global_cell_idx ∈ grid_to_distribute.cellsets[key] + for global_cell_idx ∈ global_grid.cellsets[key] if haskey(global_to_local_cell_map[my_rank], global_cell_idx) push!(cellsets[key], global_to_local_cell_map[my_rank][global_cell_idx]) end @@ -143,9 +142,9 @@ function NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_to end nodesets = Dict{String,Set{Int}}() - for key ∈ keys(grid_to_distribute.nodesets) + for key ∈ keys(global_grid.nodesets) nodesets[key] = Set{Int}() # create empty set, so it does not crash during assembly - for global_node_idx ∈ grid_to_distribute.nodesets[key] + for global_node_idx ∈ global_grid.nodesets[key] if haskey(global_to_local_node_map, global_node_idx) push!(nodesets[key], global_to_local_node_map[global_node_idx]) end @@ -153,9 +152,9 @@ function NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_to end facesets = Dict{String,Set{FaceIndex}}() - for key ∈ keys(grid_to_distribute.facesets) + for key ∈ keys(global_grid.facesets) facesets[key] = Set{FaceIndex}() # create empty set, so it does not crash during assembly - for (global_cell_idx, i) ∈ grid_to_distribute.facesets[key] + for (global_cell_idx, i) ∈ global_grid.facesets[key] if haskey(global_to_local_cell_map[my_rank], global_cell_idx) push!(facesets[key], FaceIndex(global_to_local_cell_map[my_rank][global_cell_idx], i)) end @@ -163,9 +162,9 @@ function NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_to end edgesets = Dict{String,Set{EdgeIndex}}() - for key ∈ keys(grid_to_distribute.edgesets) + for key ∈ keys(global_grid.edgesets) edgesets[key] = Set{EdgeIndex}() # create empty set, so it does not crash during assembly - for (global_cell_idx, i) ∈ grid_to_distribute.edgesets[key] + for (global_cell_idx, i) ∈ global_grid.edgesets[key] if haskey(global_to_local_cell_map[my_rank], global_cell_idx) push!(edgesets[key], EdgeIndex(global_to_local_cell_map[my_rank][global_cell_idx], i)) end @@ -173,15 +172,25 @@ function NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_to end vertexsets = Dict{String,Set{VertexIndex}}() - for key ∈ keys(grid_to_distribute.vertexsets) + for key ∈ keys(global_grid.vertexsets) vertexsets[key] = Set{VertexIndex}() # create empty set, so it does not crash during assembly - for (global_cell_idx, i) ∈ grid_to_distribute.vertexsets[key] + for (global_cell_idx, i) ∈ global_grid.vertexsets[key] if haskey(global_to_local_cell_map[my_rank], global_cell_idx) push!(vertexsets[key], VertexIndex(global_to_local_cell_map[my_rank][global_cell_idx], i)) end end end + # Invert lookup table for fast queries + local_to_global_cell_map = zeros(Int, length(local_cells)) + for (global_cellidx, local_cellidx) in global_to_local_cell_map[my_rank] + local_to_global_cell_map[local_cellidx] = global_cellidx + end + # Debug check + for (local_cellidx, global_cellidx) in enumerate(local_to_global_cell_map) + @assert global_cellidx != 0 "$local_cellidx not mapped to any global element. Aborting. (R$my_rank)" + end + local_grid = Grid( local_cells, local_nodes, @@ -192,84 +201,113 @@ function NODGrid(grid_comm::MPI.Comm, grid_to_distribute::Grid{dim,C,T}, grid_to vertexsets=vertexsets ) - shared_vertices = Dict{VertexIndex,SharedVertex}() - shared_edges = Dict{EdgeIndex,SharedEdge}() - shared_faces = Dict{FaceIndex,SharedFace}() - for (global_cell_idx,global_cell) ∈ enumerate(getcells(grid_to_distribute)) - if partitioning[global_cell_idx] == my_rank - # Vertex - for (i, _) ∈ enumerate(Ferrite.vertices(global_cell)) - cell_vertex = VertexIndex(global_cell_idx, i) - remote_vertices = Dict{Int,Vector{VertexIndex}}() - for other_vertex ∈ getneighborhood(grid_topology, grid_to_distribute, cell_vertex, true) - (global_cell_neighbor_idx, j) = other_vertex - other_rank = partitioning[global_cell_neighbor_idx] - if other_rank != my_rank - if Ferrite.toglobal(grid_to_distribute,cell_vertex) == Ferrite.toglobal(grid_to_distribute,other_vertex) - if !haskey(remote_vertices,other_rank) - remote_vertices[other_rank] = Vector(undef,0) - end - Ferrite.@debug println("Detected shared vertex $cell_vertex neighbor $other_vertex (R$my_rank)") - push!(remote_vertices[other_rank], VertexIndex(global_to_local_cell_map[other_rank][global_cell_neighbor_idx], j)) - end + # We use these to efficiently determine the unique vertices, faces and edges + shared_vertices = Dict{VertexRepresentation,SharedVertex}() + shared_edges = Dict{EdgeRepresentation,SharedEdge}() + shared_faces = Dict{FaceRepresentation,SharedFace}() + # TODO rewrite more efficiently by looping over the local boundary and check for the codim 1 entity if the global grid has an associated neighboring element + for (local_cell_idx,local_cell) ∈ enumerate(getcells(local_grid)) + global_cell_idx = local_to_global_cell_map[local_cell_idx] + global_cell = getcells(global_grid, global_cell_idx) + + # Vertex + for (vi, local_vertex_node) ∈ enumerate(Ferrite.vertices(local_cell)) + # If we have already visited the vertex we can just skip + local_vid = VertexRepresentation(local_vertex_node) + haskey(shared_vertices, local_vid) && continue + + # Note that by construction the cells in the global and local grid share the same orientation + cell_vertex = VertexIndex(global_cell_idx, vi) + # Stores local shared vertex index [1, num_local_shared_vertices] -> VertexIndex in local grid + local_vertices = Vector{VertexIndex}() + # Stores for each neighboring rank local shared vertex index [1, num_local_shared_vertices_towards_rank] -> VertexIndex in local grid on remote + remote_vertices = Dict{Int,Vector{VertexIndex}}() + for global_neighbor_vertex ∈ getneighborhood(grid_topology, global_grid, cell_vertex, true) + # Unpack VertexIndex + (global_cell_neighbor_idx, neighbor_vi) = global_neighbor_vertex + # Get rank of associated element + neighbor_rank = partitioning[global_cell_neighbor_idx] + # Store whether the neighbor is remote or not + if neighbor_rank != my_rank + if !haskey(remote_vertices,neighbor_rank) + remote_vertices[neighbor_rank] = Vector(undef,0) end - end - - if length(remote_vertices) > 0 - idx = VertexIndex(global_to_local_cell_map[my_rank][global_cell_idx], i) - shared_vertices[idx] = SharedVertex(idx, remote_vertices) + Ferrite.@debug println("Detected shared vertex $local_vid remote neighbor $global_neighbor_vertex on $neighbor_rank (R$my_rank)") + push!(remote_vertices[neighbor_rank], VertexIndex(global_to_local_cell_map[neighbor_rank][global_cell_neighbor_idx], neighbor_vi)) + else + Ferrite.@debug println("Detected shared vertex $local_vid local neighbor $global_neighbor_vertex (R$my_rank)") + push!(local_vertices, VertexIndex(global_to_local_cell_map[my_rank][global_cell_neighbor_idx], neighbor_vi)) end end - # Face - if dim > 1 - for (i, _) ∈ enumerate(Ferrite.faces(global_cell)) - cell_face = FaceIndex(global_cell_idx, i) - remote_faces = Dict{Int,Vector{FaceIndex}}() - for other_face ∈ getneighborhood(grid_topology, grid_to_distribute, cell_face, true) - (global_cell_neighbor_idx, j) = other_face - other_rank = partitioning[global_cell_neighbor_idx] - if other_rank != my_rank - if Ferrite.toglobal(grid_to_distribute,cell_face) == Ferrite.toglobal(grid_to_distribute,other_face) - if !haskey(remote_faces,other_rank) - remote_faces[other_rank] = Vector(undef,0) - end - Ferrite.@debug println("Detected shared face $cell_face neighbor $other_face (R$my_rank)") - push!(remote_faces[other_rank], FaceIndex(global_to_local_cell_map[other_rank][global_cell_neighbor_idx], j)) - end - end - end + # Just store store the information if there is some actual remote neighbor + if length(remote_vertices) > 0 + shared_vertices[local_vid] = SharedVertex(local_vid, local_vertices, remote_vertices) + else + # local vertex: do nothing + end + end - if length(remote_faces) > 0 - idx = FaceIndex(global_to_local_cell_map[my_rank][global_cell_idx], i) - shared_faces[idx] = SharedFace(idx, remote_faces) - end + # Face + if dim > 1 + # If we have already visited the face we can just skip + for (fi, local_face_nodes) ∈ enumerate(Ferrite.faces(local_cell)) + # The face should also just have one real face neighbor in the topology + global_neighbor_faces = getneighborhood(grid_topology, global_grid, FaceIndex(global_cell_idx, fi), false) + length(global_neighbor_faces) == 0 && continue # True boundary + @assert length(global_neighbor_faces) == 1 "Face topology broken! (R$my_rank)" + + # If we hit the same shared face twice in a local grid, then the grid must be broken, because the shared faces must be on the boundary and hence just associated to one local cell! + local_fid = FaceRepresentation(local_face_nodes) + @assert !haskey(shared_faces, local_fid) "Grid topology broken. Boundary face with multiple elements attached detected." + + # Unpack face + (global_cell_neighbor_idx, neighbor_fi) = global_neighbor_faces[1] + neighbor_rank = partitioning[global_cell_neighbor_idx] + if neighbor_rank != my_rank + # Construct local information for current and remote rank + Ferrite.@debug println("Detected shared face $local_fid neighbor $(global_neighbor_faces[1]) on $neighbor_rank (R$my_rank)") + lfi = FaceIndex(local_cell_idx, fi) + rfi = Dict(Pair(neighbor_rank, FaceIndex(global_to_local_cell_map[neighbor_rank][global_cell_neighbor_idx], neighbor_fi))) + shared_faces[local_fid] = SharedFace(local_fid, lfi, rfi) + else + # local face: do nothing end end + end - # Edge - if dim > 2 - for (i, _) ∈ enumerate(Ferrite.edges(global_cell)) - cell_edge = EdgeIndex(global_cell_idx, i) - remote_edges = Dict{Int,Vector{EdgeIndex}}() - for other_edge ∈ getneighborhood(grid_topology, grid_to_distribute, cell_edge, true) - (global_cell_neighbor_idx, j) = other_edge - other_rank = partitioning[global_cell_neighbor_idx] - if other_rank != my_rank - if Ferrite.toglobal(grid_to_distribute,cell_edge) == Ferrite.toglobal(grid_to_distribute,other_edge) - if !haskey(remote_edges,other_edge) - remote_edges[other_rank] = Vector(undef,0) - end - Ferrite.@debug println("Detected shared edge $cell_edge neighbor $other_edge (R$my_rank)") - push!(remote_edges[other_rank], EdgeIndex(global_to_local_cell_map[other_rank][global_cell_neighbor_idx], j)) - end + # Edge + if dim > 2 + for (ei, local_edge_nodes) ∈ enumerate(Ferrite.edges(local_cell)) + # If we have already visited the edge we can just skip + local_eid = EdgeRepresentation(local_edge_nodes) + haskey(shared_edges, local_eid) && continue + + # Note that by construction the cells in the global and local grid share the same orientation + cell_edge = EdgeIndex(global_cell_idx, ei) + # Stores local shared edge index [1, num_local_shared_edges] -> EdgeIndex in local grid + local_edges = Vector{EdgeIndex}() + # Stores for each neighboring rank local shared edge index [1, num_local_shared_edges_towards_rank] -> EdgeIndex in local grid on remote + remote_edges = Dict{Int,Vector{EdgeIndex}}() + for global_neighbor_edge ∈ getneighborhood(grid_topology, global_grid, cell_edge, true) + # Unpack edge + (global_cell_neighbor_idx, neighbor_ei) = global_neighbor_edge + neighbor_rank = partitioning[global_cell_neighbor_idx] + # Store whether the neighbor is remote or not + if neighbor_rank != my_rank + if !haskey(remote_edges, global_neighbor_edge) + remote_edges[neighbor_rank] = Vector(undef,0) end + Ferrite.@debug println("Detected shared edge $local_eid remote neighbor $global_neighbor_edge on $neighbor_rank (R$my_rank)") + push!(remote_edges[neighbor_rank], EdgeIndex(global_to_local_cell_map[neighbor_rank][global_cell_neighbor_idx], neighbor_ei)) + else + Ferrite.@debug println("Detected shared edge $local_eid local neighbor $global_neighbor_edge (R$my_rank)") + push!(local_edges, EdgeIndex(global_to_local_cell_map[my_rank][global_cell_neighbor_idx], neighbor_ei)) end + end - if length(remote_edges) > 0 - idx = EdgeIndex(global_to_local_cell_map[my_rank][global_cell_idx], i) - shared_edges[idx] = SharedEdge(idx, remote_edges) - end + if length(remote_edges) > 0 + shared_edges[local_eid] = SharedEdge(local_eid, local_edges, remote_edges) end end end diff --git a/src/SharedEntity.jl b/src/SharedEntity.jl index 5289200..649667a 100644 --- a/src/SharedEntity.jl +++ b/src/SharedEntity.jl @@ -3,7 +3,7 @@ Supertype for shared entities. """ -abstract type SharedEntity end +abstract type SharedEntity <: Entity end """ remote_entities(::SharedEntity) @@ -12,31 +12,36 @@ Get an iterable of pairs, containing the 1-based rank and a collection of Entity """ remote_entities(::SharedEntity) -# TODO the following three structs could be merged to one struct with type parameter. -# We might want to think about the design a bit. + """ SharedVertex <: SharedEntity A shared vertex induced by a local vertex index and all remote vertex indices on all remote ranks. """ struct SharedVertex <: SharedEntity - local_idx::VertexIndex + unique_local_representation::VertexRepresentation # Identify via node in grid + local_vertices::Vector{VertexIndex} remote_vertices::Dict{Int,Vector{VertexIndex}} end +@inline local_entities(sv::SharedVertex) = sv.local_vertices @inline remote_entities(sv::SharedVertex) = sv.remote_vertices + """ SharedFace <: SharedEntity A shared face induced by a local face index and all remote face indices on all remote ranks. """ struct SharedFace <: SharedEntity - local_idx::FaceIndex - remote_faces::Dict{Int,Vector{FaceIndex}} + unique_local_representation::FaceRepresentation + local_face::FaceIndex + remote_face::Dict{Int,FaceIndex} end -@inline remote_entities(sf::SharedFace) = sf.remote_faces +@inline local_entities(sf::SharedFace) = (sf.local_face,) +@inline remote_entities(sf::SharedFace) = sf.remote_face + """ SharedEdge <: SharedEntity @@ -44,8 +49,10 @@ end A shared edge induced by a local edge index and all remote edge indices on all remote ranks. """ struct SharedEdge <: SharedEntity - local_idx::EdgeIndex + unique_local_representation::EdgeRepresentation # Identify via node in grid + local_edges::Vector{EdgeIndex} remote_edges::Dict{Int,Vector{EdgeIndex}} end +@inline local_entities(se::SharedEdge) = se.local_edges @inline remote_entities(se::SharedEdge) = se.remote_edges diff --git a/src/VTK.jl b/src/VTK.jl index 213ad66..311c34b 100644 --- a/src/VTK.jl +++ b/src/VTK.jl @@ -1,5 +1,5 @@ """ -vtk_grid(::AbstractString, ::AbstractNODGrid{dim}; compress::Bool=true) + vtk_grid(::AbstractString, ::AbstractNODGrid{dim}; compress::Bool=true) Store the grid as a PVTK file. """ @@ -27,9 +27,11 @@ function vtk_shared_vertices(pvtk::WriteVTK.PVTKFile, dgrid::AbstractNODGrid) fill!(u, 0.0) for sv ∈ get_shared_vertices(dgrid) if haskey(sv.remote_vertices, rank) - (cellidx, i) = sv.local_idx - cell = getcells(dgrid, cellidx) - u[Ferrite.vertices(cell)[i]] = my_rank + for lfi ∈ local_entities(sv) + (cellidx, i) = lfi + cell = getcells(dgrid, cellidx) + u[Ferrite.vertices(cell)[i]] = my_rank + end end end vtk_point_data(pvtk.vtk, u, "shared vertices with $rank") @@ -45,11 +47,13 @@ function vtk_shared_faces(pvtk::WriteVTK.PVTKFile, dgrid::AbstractNODGrid) for rank ∈ 1:global_rank(dgrid) fill!(u, 0.0) for sf ∈ get_shared_faces(dgrid) - if haskey(sf.remote_faces, rank) - (cellidx, i) = sf.local_idx - cell = getcells(dgrid, cellidx) - facenodes = Ferrite.faces(cell)[i] - u[[facenodes...]] .= my_rank + if haskey(sf.remote_face, rank) + for lfi ∈ local_entities(sf) + (cellidx, i) = lfi + cell = getcells(dgrid, cellidx) + facenodes = Ferrite.faces(cell)[i] + u[[facenodes...]] .= my_rank + end end end vtk_point_data(pvtk.vtk, u, "shared faces with $rank") @@ -66,10 +70,12 @@ function vtk_shared_edges(pvtk::WriteVTK.PVTKFile, dgrid::AbstractNODGrid) fill!(u, 0.0) for se ∈ get_shared_edges(dgrid) if haskey(se.remote_edges, rank) - (cellidx, i) = se.local_idx - cell = getcells(dgrid, cellidx) - edgenodes = Ferrite.edges(cell)[i] - u[[edgenodes...]] .= my_rank + for lei ∈ local_entities(se) + (cellidx, i) = lei + cell = getcells(dgrid, cellidx) + edgenodes = Ferrite.edges(cell)[i] + u[[edgenodes...]] .= my_rank + end end end vtk_point_data(pvtk.vtk, u, "shared edges with $rank") diff --git a/src/interface.jl b/src/interface.jl index fbd729f..58e71d9 100644 --- a/src/interface.jl +++ b/src/interface.jl @@ -65,45 +65,57 @@ Get an interable over the shared faces. """ get_shared_vertex(::AbstractNODGrid, ::VertexIndex) + get_shared_vertex(::AbstractNODGrid, ::VertexRepresentation) -Get the shared vertex associated to the VertexIndex, if it exists. +Get the shared vertex associated to the vertex, if it exists. """ -@inline get_shared_vertex(dgrid::AbstractNODGrid, vi::VertexIndex) = dgrid.shared_vertices[vi] +@inline get_shared_vertex(dgrid::AbstractNODGrid, vi::VertexIndex) = get_shared_vertex(dgrid, representation(getlocalgrid(dgrid), vi)) +@inline get_shared_vertex(dgrid::AbstractNODGrid, vertex::VertexRepresentation) = dgrid.shared_vertices[vertex] """ get_shared_edge(::AbstractNODGrid, ::EdgeIndex) + get_shared_edge(::AbstractNODGrid, ::EdgeRepresentation) -Get the shared edge associated to the EdgeIndex, if it exists. +Get the shared edge associated to the edge, if it exists. """ -@inline get_shared_edge(dgrid::AbstractNODGrid, ei::EdgeIndex) = dgrid.shared_edges[ei] +@inline get_shared_edge(dgrid::AbstractNODGrid, ei::EdgeIndex) = get_shared_edge(dgrid, representation(getlocalgrid(dgrid), ei)) +@inline get_shared_edge(dgrid::AbstractNODGrid, edge::EdgeRepresentation) = dgrid.shared_edges[edge] """ get_shared_face(::AbstractNODGrid, ::FaceIndex) + get_shared_face(::AbstractNODGrid, ::FaceRepresentation) -Get the shared edge associated to the FaceIndex, if it exists. +Get the shared edge associated to the face, if it exists. """ -@inline get_shared_face(dgrid::AbstractNODGrid, fi::FaceIndex) = dgrid.shared_faces[fi] +@inline get_shared_face(dgrid::AbstractNODGrid, fi::FaceIndex) = get_shared_face(dgrid, representation(getlocalgrid(dgrid), fi)) +@inline get_shared_face(dgrid::AbstractNODGrid, face::FaceRepresentation) = dgrid.shared_faces[face] """ is_shared_vertex(::AbstractNODGrid, ::VertexIndex) + is_shared_vertex(::AbstractNODGrid, ::VertexRepresentation) Check if a VertexIndex is a shared vertex. """ -@inline is_shared_vertex(dgrid::AbstractNODGrid, vi::VertexIndex) = haskey(dgrid.shared_vertices, vi) +@inline is_shared_vertex(dgrid::AbstractNODGrid, vi::VertexIndex) = is_shared_vertex(dgrid, representation(getlocalgrid(dgrid), vi)) +@inline is_shared_vertex(dgrid::AbstractNODGrid, vertex::VertexRepresentation) = haskey(dgrid.shared_vertices, vertex) """ is_shared_edge(::AbstractNODGrid, ::EdgeIndex) + is_shared_edge(::AbstractNODGrid, ::EdgeRepresentation) Check if a EdgeIndex is a shared edge. """ -@inline is_shared_edge(dgrid::AbstractNODGrid, ei::EdgeIndex) = haskey(dgrid.shared_edges, ei) +@inline is_shared_edge(dgrid::AbstractNODGrid, ei::EdgeIndex) = is_shared_edge(dgrid, representation(getlocalgrid(dgrid), ei)) +@inline is_shared_edge(dgrid::AbstractNODGrid, edge::EdgeRepresentation) = haskey(dgrid.shared_edges, edge) """ is_shared_face(::AbstractNODGrid, ::FaceIndex) + is_shared_face(::AbstractNODGrid, ::FaceRepresentation) Check if a FaceIndex is a shared face. """ -@inline is_shared_face(dgrid::AbstractNODGrid, fi::FaceIndex) = haskey(dgrid.shared_faces, fi) +@inline is_shared_face(dgrid::AbstractNODGrid, fi::FaceIndex) = is_shared_face(dgrid, representation(getlocalgrid(dgrid), fi)) +@inline is_shared_face(dgrid::AbstractNODGrid, face::FaceRepresentation) = haskey(dgrid.shared_faces, face) """ getlocalgrid(::AbstractNODGrid) @@ -143,9 +155,9 @@ Get the representative local grid containing only a vanilla local grid. @inline Ferrite.getvertexsets(grid::AbstractNODGrid) = getvertexset(getlocalgrid(grid), setname) """ - extract_local_part!(u_ferrite::Vector, u_extension, dh::Ferrite.AbstractDofHandler) + extract_local_part!(u_ferrite::AbstractVector, u_extension, dh::Ferrite.AbstractDofHandler) Entry point for extensions to register a transfer function translating the solution representation of the extension to a Ferrite compatible vector. """ -extract_local_part!(u_ferrite::Vector, u_extension, dh::Ferrite.AbstractDofHandler) = error("Not implemented.") +extract_local_part!(u_ferrite::AbstractVector, u_extension, dh::Ferrite.AbstractDofHandler) = error("Not implemented.") diff --git a/src/utils.jl b/src/utils.jl index c1bb2b0..e6d03d4 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -7,7 +7,7 @@ toglobal(grid::AbstractGrid, vertexidx::Vector{FaceIndex}) -> Vector{Tuple{Int}} This function takes the local face representation (a `FaceIndex`) and looks up the unique global id (a tuple of `Int`). """ -Ferrite.toglobal(grid::Ferrite.AbstractGrid,faceidx::Ferrite.FaceIndex) = Ferrite.sortface(faces(getcells(grid,faceidx[1]))[faceidx[2]]) +Ferrite.toglobal(grid::Ferrite.AbstractGrid,faceidx::Ferrite.FaceIndex) = Ferrite.sortface_fast(faces(getcells(grid,faceidx[1]))[faceidx[2]]) Ferrite.toglobal(grid::Ferrite.AbstractGrid,faceidx::Vector{FaceIndex}) = unique(Ferrite.toglobal.((grid,),faceidx)) """ @@ -15,8 +15,32 @@ Ferrite.toglobal(grid::Ferrite.AbstractGrid,faceidx::Vector{FaceIndex}) = unique toglobal(grid::AbstractGrid, vertexidx::Vector{EdgeIndex}) -> Vector{Tuple{Int}} This function takes the local face representation (an `EdgeIndex`) and looks up the unique global id (a tuple of `Int`). """ -Ferrite.toglobal(grid::Ferrite.AbstractGrid,edgeidx::Ferrite.EdgeIndex) = Ferrite.sortedge(edges(getcells(grid,edgeidx[1]))[edgeidx[2]])[1] -Ferrite.toglobal(grid::Ferrite.AbstractGrid,edgeidx::Vector{Ferrite.EdgeIndex}) = unique(toglobal.((grid,),edgeidx)) +Ferrite.toglobal(grid::Ferrite.AbstractGrid,edgeidx::Ferrite.EdgeIndex) = Ferrite.sortedge_fast(edges(getcells(grid,edgeidx[1]))[edgeidx[2]]) +Ferrite.toglobal(grid::Ferrite.AbstractGrid,edgeidx::Vector{Ferrite.EdgeIndex}) = unique(Ferrite.toglobal.((grid,),edgeidx)) + +""" + representation(grid::AbstractGrid, vertexidx::VertexIndex) -> Int + representation(grid::AbstractGrid, vertexidx::AbstractVector{VertexIndex}) -> Vector{Tuple{Int}} +This function takes the local vertex representation (a `VertexIndex`) and looks up the unique global id. +""" +representation(grid::AbstractGrid,vertexidx::VertexIndex) = VertexRepresentation(Ferrite.toglobal(grid,vertexidx)) +representation(grid::AbstractGrid,vertexidx::AbstractVector{VertexIndex}) = VertexRepresentation.(unique(Ferrite.toglobal.((grid,),vertexidx))) + +""" + representation(grid::AbstractGrid, vertexidx::FaceIndex) -> Int + representation(grid::AbstractGrid, vertexidx::AbstractVector{FaceIndex}) -> Vector{Tuple{Int}} +This function takes the local face representation (a `FaceIndex`) and looks up the unique global id (a tuple of `Int`). +""" +representation(grid::AbstractGrid,faceidx::FaceIndex) = FaceRepresentation(faces(getcells(grid,faceidx[1]))[faceidx[2]]) +representation(grid::AbstractGrid,faceidx::AbstractVector{FaceIndex}) = FaceRepresentation.(Ferrite.toglobal.((grid,),faceidx)) + +""" + representation(grid::AbstractGrid, vertexidx::EdgeIndex) -> Int + representation(grid::AbstractGrid, vertexidx::AbstractVector{EdgeIndex}) -> Vector{Tuple{Int}} +This function takes the local edge representation (an `EdgeIndex`) and looks up the unique global id (a tuple of `Int`). +""" +representation(grid::AbstractGrid,edgeidx::EdgeIndex) = EdgeRepresentation(edges(getcells(grid,edgeidx[1]))[edgeidx[2]]) +representation(grid::Ferrite.AbstractGrid,edgeidx::Vector{EdgeIndex}) = EdgeRepresentation.(Ferrite.toglobal.((grid,),edgeidx)) # ------------------------------------ diff --git a/test/test_distributed_impl_2.jl b/test/test_distributed_impl_2.jl index f5c0f6c..7ae5232 100644 --- a/test/test_distributed_impl_2.jl +++ b/test/test_distributed_impl_2.jl @@ -20,6 +20,7 @@ end # Edges @test length(get_shared_edges(dgrid)) == 4 function check_edge_correctly_shared_1(idx_local, idx_nonlocal) + @test is_shared_edge(dgrid, idx_local) se = get_shared_edge(dgrid, idx_local) @test FerriteDistributed.remote_entities(se) == Dict(2 => [idx_nonlocal]) end @@ -30,12 +31,14 @@ end # Faces @test length(get_shared_faces(dgrid)) == 1 + @test is_shared_face(dgrid, FaceIndex(1,5)) sf = get_shared_face(dgrid, FaceIndex(1,5)) - @test FerriteDistributed.remote_entities(sf) == Dict(2 => [FaceIndex(1,3)]) + @test FerriteDistributed.remote_entities(sf) == Dict(2 => FaceIndex(1,3)) elseif my_rank == 2 # Edges @test length(get_shared_edges(dgrid)) == 4 function check_edge_correctly_shared_2(idx_nonlocal, idx_local) + @test is_shared_edge(dgrid, idx_local) se = get_shared_edge(dgrid, idx_local) @test FerriteDistributed.remote_entities(se) == Dict(1 => [idx_nonlocal]) end @@ -46,8 +49,9 @@ end # Faces @test length(get_shared_faces(dgrid)) == 1 + @test is_shared_face(dgrid, FaceIndex(1,3)) sf = get_shared_face(dgrid, FaceIndex(1,3)) - @test FerriteDistributed.remote_entities(sf) == Dict(1 => [FaceIndex(1,5)]) + @test FerriteDistributed.remote_entities(sf) == Dict(1 => FaceIndex(1,5)) end MPI.Finalize() end diff --git a/test/test_distributed_impl_3.jl b/test/test_distributed_impl_3.jl index e143029..9d790fc 100644 --- a/test/test_distributed_impl_3.jl +++ b/test/test_distributed_impl_3.jl @@ -1,7 +1,7 @@ using FerriteDistributed using Test -import FerriteDistributed: CoverTopology, global_rank +import FerriteDistributed: CoverTopology, global_rank, representation MPI.Init() @testset "MPI setup 3" begin @@ -50,31 +50,31 @@ end if my_rank == 1 lgrid = getlocalgrid(dgrid) @test getncells(lgrid) == 1 - non_shared_vertices = [VertexIndex(1,1)] - non_shared_faces = [FaceIndex(1,1), FaceIndex(1,4)] + non_shared_vertices = representation(dgrid, [VertexIndex(1,1)]) + non_shared_faces = representation(dgrid, [FaceIndex(1,1), FaceIndex(1,4)]) elseif my_rank == 2 lgrid = getlocalgrid(dgrid) @test getncells(lgrid) == 1 - non_shared_vertices = [VertexIndex(1,3)] - non_shared_faces = [FaceIndex(1,2), FaceIndex(1,3)] + non_shared_vertices = representation(dgrid, [VertexIndex(1,3)]) + non_shared_faces = representation(dgrid, [FaceIndex(1,2), FaceIndex(1,3)]) elseif my_rank == 3 lgrid = getlocalgrid(dgrid) @test getncells(lgrid) == 2 - non_shared_vertices = [VertexIndex(1,2), VertexIndex(2,4)] - non_shared_faces = [FaceIndex(1,1), FaceIndex(1,2), FaceIndex(2,3), FaceIndex(2,4)] + non_shared_vertices = representation(dgrid, [VertexIndex(1,2), VertexIndex(2,4)]) + non_shared_faces = representation(dgrid, [FaceIndex(1,1), FaceIndex(1,2), FaceIndex(2,3), FaceIndex(2,4)]) else # Abstract machine or memory corruption during exectution above. @test false end for sv ∈ get_shared_vertices(dgrid) - @test sv.local_idx ∉ Set(non_shared_vertices) + @test sv.unique_local_representation ∉ Set(non_shared_vertices) end for v ∈ non_shared_vertices @test !is_shared_vertex(dgrid, v) end for sf ∈ get_shared_faces(dgrid) - @test sf.local_idx ∉ Set(non_shared_faces) + @test sf.unique_local_representation ∉ Set(non_shared_faces) end for f ∈ non_shared_faces @test !is_shared_face(dgrid, f) @@ -117,40 +117,40 @@ end if my_rank == 1 lgrid = getlocalgrid(dgrid) @test getncells(lgrid) == 1 - non_shared_vertices = [VertexIndex(1,1), VertexIndex(1,5)] - non_shared_faces = [FaceIndex(1,1), FaceIndex(1,6), FaceIndex(1,2), FaceIndex(1,5)] - non_shared_edges = [EdgeIndex(1,1), EdgeIndex(1,4), EdgeIndex(1,5), EdgeIndex(1,8), EdgeIndex(1,9)] + non_shared_vertices = representation(dgrid, [VertexIndex(1,1), VertexIndex(1,5)]) + non_shared_faces = representation(dgrid, [FaceIndex(1,1), FaceIndex(1,6), FaceIndex(1,2), FaceIndex(1,5)]) + non_shared_edges = representation(dgrid, [EdgeIndex(1,1), EdgeIndex(1,4), EdgeIndex(1,5), EdgeIndex(1,8), EdgeIndex(1,9)]) elseif my_rank == 2 lgrid = getlocalgrid(dgrid) @test getncells(lgrid) == 1 - non_shared_vertices = [VertexIndex(1,3), VertexIndex(1,7)] - non_shared_faces = [FaceIndex(1,1), FaceIndex(1,6), FaceIndex(1,3), FaceIndex(1,4)] - non_shared_edges = [EdgeIndex(1,2), EdgeIndex(1,6), EdgeIndex(1,3), EdgeIndex(1,7), EdgeIndex(1,11)] + non_shared_vertices = representation(dgrid, [VertexIndex(1,3), VertexIndex(1,7)]) + non_shared_faces = representation(dgrid, [FaceIndex(1,1), FaceIndex(1,6), FaceIndex(1,3), FaceIndex(1,4)]) + non_shared_edges = representation(dgrid, [EdgeIndex(1,2), EdgeIndex(1,6), EdgeIndex(1,3), EdgeIndex(1,7), EdgeIndex(1,11)]) elseif my_rank == 3 lgrid = getlocalgrid(dgrid) @test getncells(lgrid) == 2 - non_shared_vertices = [VertexIndex(1,2), VertexIndex(1,6), VertexIndex(2,4), VertexIndex(2,8)] - non_shared_faces = [FaceIndex(1,1), FaceIndex(1,6), FaceIndex(1,2), FaceIndex(1,3), FaceIndex(2,1), FaceIndex(2,6), FaceIndex(2,5), FaceIndex(2,4)] - non_shared_edges = [EdgeIndex(1,1), EdgeIndex(1,5), EdgeIndex(1,2), EdgeIndex(1,6), EdgeIndex(1,10), EdgeIndex(2,4), EdgeIndex(2,8), EdgeIndex(2,3), EdgeIndex(2,7), EdgeIndex(2,12)] + non_shared_vertices = representation(dgrid, [VertexIndex(1,2), VertexIndex(1,6), VertexIndex(2,4), VertexIndex(2,8)]) + non_shared_faces = representation(dgrid, [FaceIndex(1,1), FaceIndex(1,6), FaceIndex(1,2), FaceIndex(1,3), FaceIndex(2,1), FaceIndex(2,6), FaceIndex(2,5), FaceIndex(2,4)]) + non_shared_edges = representation(dgrid, [EdgeIndex(1,1), EdgeIndex(1,5), EdgeIndex(1,2), EdgeIndex(1,6), EdgeIndex(1,10), EdgeIndex(2,4), EdgeIndex(2,8), EdgeIndex(2,3), EdgeIndex(2,7), EdgeIndex(2,12)]) else # Abstract machine or memory corruption during exectution above. @test false end for sv ∈ get_shared_vertices(dgrid) - @test sv.local_idx ∉ Set(non_shared_vertices) + @test sv.unique_local_representation ∉ Set(non_shared_vertices) end for v ∈ non_shared_vertices @test !is_shared_vertex(dgrid, v) end for sf ∈ get_shared_faces(dgrid) - @test sf.local_idx ∉ Set(non_shared_faces) + @test sf.unique_local_representation ∉ Set(non_shared_faces) end for f ∈ non_shared_faces @test !is_shared_face(dgrid, f) end for se ∈ get_shared_edges(dgrid) - @test se.local_idx ∉ Set(non_shared_edges) + @test se.unique_local_representation ∉ Set(non_shared_edges) end for e ∈ non_shared_edges @test !is_shared_edge(dgrid, e) diff --git a/test/test_distributed_impl_5.jl b/test/test_distributed_impl_5.jl index 173ccd3..989e25e 100644 --- a/test/test_distributed_impl_5.jl +++ b/test/test_distributed_impl_5.jl @@ -21,7 +21,7 @@ end global_topology = CoverTopology(global_grid) dgrid = NODGrid(comm, global_grid, global_topology, [3,3,4,2,5,4,1,2,5]) my_rank = global_rank(dgrid) - + dh = DofHandler(dgrid) add!(dh, :u, ip) close!(dh);