diff --git a/DESCRIPTION b/DESCRIPTION index 10c19cfc..28ff21df 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: migraph Title: Inferential Methods for Multimodal and Other Networks -Version: 1.6.0 -Date: 2026-04-04 +Version: 1.6.1 +Date: 2026-04-14 Description: A set of tools for testing networks. It includes functions for univariate and multivariate conditional uniform graph and quadratic assignment procedure testing, diff --git a/NEWS.md b/NEWS.md index b095cbd7..612d868f 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,24 @@ +# migraph 1.6.1 + +2026-04-13 + +## Package + +- Fixed logo sizing and location +- Added tests for `predict()` methods +- Added testing for `{netrics}` tutorials + +## Tutorials + +- Fixed `run_tute()` and `extract_tute()` to identify and use tutorials in `{netrics}` +- Updated diversity tutorial with new netrics names +- Updated diffusion tutorial with new netrics names +- Updated ergm tutorial with new netrics names + +## Testing + +- Fixed how `test_fit()` handles diffusion models and non-equal length step results + # migraph 1.6.0 2026-04-04 diff --git a/R/model_distrib.R b/R/model_distrib.R index 9fb39165..40874cbd 100644 --- a/R/model_distrib.R +++ b/R/model_distrib.R @@ -57,10 +57,16 @@ test_distribution <- function(diff_model1, diff_model2){ #' @export test_fit <- function(diff_model, diff_models){ # make into method? x <- diff_model + if(is_graph(x)) x <- as_diffusion(x) y <- diff_models sim <- `0` <- NULL - sims <- y %>% dplyr::select(sim, t, I) - sims <- as.data.frame.matrix(stats::xtabs(I ~ sim + t, sims)) # tidyr::pivot_wider replacement + sims <- y %>% dplyr::select(sim, time, I) + if(max(x$time) < max(sims$time)){ + x <- dplyr::bind_rows(x, + dplyr::tibble(time = (max(x$time)+1):max(sims$time), + S = x$S[nrow(x)], I = x$I[nrow(x)])) + } + sims <- as.data.frame.matrix(stats::xtabs(I ~ sim + time, sims)) # tidyr::pivot_wider replacement sims <- sims[,colSums(stats::cov(sims))!=0] mah <- stats::mahalanobis(x$I[-1], colMeans(sims), stats::cov(sims)) pval <- pchisq(mah, df=length(x$I[-1]), lower.tail=FALSE) diff --git a/R/tutorial_run.R b/R/tutorial_run.R index 3c1af167..efad953c 100644 --- a/R/tutorial_run.R +++ b/R/tutorial_run.R @@ -18,7 +18,7 @@ #' @name tutorials NULL -stocnet <- c("manynet", "migraph", "autograph") +stocnet <- c("manynet", "migraph", "autograph", "netrics") #' @rdname tutorials #' @export @@ -33,12 +33,15 @@ run_tute <- function(tute) { dplyr::as_tibble(learnr::available_tutorials(package = avail_pkgs[p]), silent = TRUE) %>% dplyr::select(1:3) }) - dplyr::bind_rows(tutelist) %>% dplyr::arrange(name) %>% print() + dplyr::bind_rows(tutelist) %>% + dplyr::arrange(dplyr::across(dplyr::any_of("name"))) %>% + print() manynet::snet_info("You can run a tutorial by typing e.g `run_tute('tutorial1')` or `run_tute('Data')` into the console.") } else { try(learnr::run_tutorial(tute, "manynet"), silent = TRUE) - try(learnr::run_tutorial(tute, "migraph"), silent = TRUE) try(learnr::run_tutorial(tute, "autograph"), silent = TRUE) + try(learnr::run_tutorial(tute, "netrics"), silent = TRUE) + try(learnr::run_tutorial(tute, "migraph"), silent = TRUE) manynet::snet_info("Didn't find a direct match, so looking for close matches...") tutelist <- lapply(manynet::snet_progress_along(avail_pkgs, name = "Checking tutorials in stocnet packages"), function(p){ @@ -71,7 +74,9 @@ extract_tute <- function(tute) { dplyr::as_tibble(learnr::available_tutorials(package = avail_pkgs[p]), silent = TRUE) %>% dplyr::select(1:3) }) - dplyr::bind_rows(tutelist) %>% dplyr::arrange(name) %>% print() + dplyr::bind_rows(tutelist) %>% + dplyr::arrange(dplyr::across(dplyr::any_of("name"))) %>% + print() manynet::snet_info("You can extract the code from one of these tutorials by typing e.g `extract_tute('tutorial1')` into the console.") } else { thisRequires("knitr") @@ -80,9 +85,13 @@ extract_tute <- function(tute) { thisRequires("autograph") pth <- gsub("manynet", "autograph", pth) } + if(!dir.exists(pth)) { + thisRequires("netrics") + pth <- gsub("autograph", "netrics", pth) + } if(!dir.exists(pth)) { thisRequires("migraph") - pth <- gsub("autograph", "migraph", pth) + pth <- gsub("netrics", "migraph", pth) } knitr::purl(file.path(pth, list.files(pth, pattern = "*.Rmd")), documentation = 1) diff --git a/inst/migraph.png b/inst/migraph.png new file mode 100644 index 00000000..53e7af52 Binary files /dev/null and b/inst/migraph.png differ diff --git a/inst/tutorials/tutorial7/diffusion.Rmd b/inst/tutorials/tutorial7/diffusion.Rmd index b64be7e5..c5042758 100644 --- a/inst/tutorials/tutorial7/diffusion.Rmd +++ b/inst/tutorials/tutorial7/diffusion.Rmd @@ -172,18 +172,18 @@ and `graphs()` (at least by default) only graphs the first and last step. But there is an easier way. -Play these same diffusions again, this time nesting the call within `net_infection_complete()`. +Play these same diffusions again, this time nesting the call within `net_by_infection_complete()`. ```{r completeinfection, exercise = TRUE, purl = FALSE, fig.width=9} -net_infection_complete(play_diffusion(create_ring(32, width = 2))) -net_infection_complete(play_diffusion(generate_random(32, 0.15))) -net_infection_complete(play_diffusion(generate_scalefree(32, 0.025))) -net_infection_complete(play_diffusion(generate_smallworld(32, 0.025))) +net_by_infection_complete(play_diffusion(create_ring(32, width = 2))) +net_by_infection_complete(play_diffusion(generate_random(32, 0.15))) +net_by_infection_complete(play_diffusion(generate_scalefree(32, 0.025))) +net_by_infection_complete(play_diffusion(generate_smallworld(32, 0.025))) ``` ```{r struct-interp, echo = FALSE, purl = FALSE} question("Does the structure of the network matter for whether and when a diffusion process completes?", - answer("No", message = "Take a closer look at the `net_infection_complete()` results. Are they all the same?"), + answer("No", message = "Take a closer look at the `net_by_infection_complete()` results. Are they all the same?"), answer("Yes", correct = TRUE, message = "We can see that the different structures have varying outcomes in terms of when the diffusion completes."), random_answer_order = TRUE, allow_retry = TRUE @@ -203,7 +203,7 @@ You can start the infection in California by specifying `seeds = 5`. us_diff <- play_diffusion(irps_usgeo, seeds = 5) plot(us_diff) graphr(us_diff) -net_infection_complete(us_diff) +net_by_infection_complete(us_diff) ``` What's happening here? Can you interpret this? @@ -483,9 +483,9 @@ We could use these on degree centrality, or perhaps some other kind of centralit sf <- generate_scalefree(32, 0.025) sf %>% as_tidygraph() %>% - mutate(degree = ifelse(node_is_max(node_degree(sf)) == TRUE, "max", - ifelse(node_is_min(node_degree(sf)) == TRUE, "min", "others"))) %>% - graphr(node_color = "degree") + guides(color = "legend") + labs(color = "degree") + mutate(degree = ifelse(node_is_max(node_by_degree(sf)) == TRUE, "max", + ifelse(node_is_min(node_by_degree(sf)) == TRUE, "min", "others"))) %>% + graphr(node_color = "degree") + guides(color = "legend") ``` ```{r scale, exercise = TRUE, purl = FALSE, exercise.setup="sf", fig.width=9} @@ -495,11 +495,11 @@ sf %>% ```{r scale-solution} plot(play_diffusion(sf, seeds = 10, steps = 10)) plot(play_diffusion(sf, seeds = node_is_random(sf), steps = 10)) -plot(play_diffusion(sf, seeds = node_is_max(node_degree(sf)), steps = 10)) -plot(play_diffusion(sf, seeds = node_is_min(node_degree(sf)), steps = 10)) +plot(play_diffusion(sf, seeds = node_is_max(node_by_degree(sf)), steps = 10)) +plot(play_diffusion(sf, seeds = node_is_min(node_by_degree(sf)), steps = 10)) # visualise diffusion in scalefree network -graphs(play_diffusion(sf, seeds = node_is_min(node_degree(sf)), steps = 10)) +graphs(play_diffusion(sf, seeds = node_is_min(node_by_degree(sf)), steps = 10)) graphs(play_diffusion(sf, seeds = 16, steps = 10), waves = 1:3) ``` @@ -713,14 +713,14 @@ It can be interpreted as follows: - Where $R_0$ = 1, the 'disease' will continue as endemic, if conditions allow. So how can we establish the $R_0$ here? -We can use `net_reproduction()`. +We can use `net_by_reproduction()`. ```{r r0, exercise = TRUE, exercise.setup = "diffusions", fig.width=9} rd_diff <- play_diffusion(rando, transmissibility = 0.25, recovery = 0.05) plot(rd_diff) # R-nought -net_reproduction(rd_diff) -net_infection_total(rd_diff) +net_by_reproduction(rd_diff) +net_by_infection_total(rd_diff) ``` ```{r r0-interp, echo = FALSE, purl = FALSE} @@ -762,13 +762,13 @@ the `r gloss("Herd Immunity Threshold", "hit")` or HIT. HIT indicates the threshold at which the reduction of susceptible members of the network means that infections will no longer keep increasing, allowing herd immunity to be achieved. -`net_immunity()` gives us the proportion of the population that would need +`net_by_immunity()` gives us the proportion of the population that would need to be recovered or vaccinated for the network to have herd immunity. ```{r immunity, exercise = TRUE, exercise.setup = "r0"} # Herd Immunity Threshold -net_immunity(rd_diff) -net_immunity(rd_diff, normalized = FALSE) +net_by_immunity(rd_diff) +net_by_immunity(rd_diff, normalized = FALSE) ``` In this model, the HIT score indicates a good proportion of nodes in the network @@ -780,7 +780,7 @@ Ok, so let's try this strategy. rd_diff_vacc <- play_diffusion(rando, transmissibility = 0.25, recovery = 0.05, immune = 2:9) plot(rd_diff_vacc) -net_infection_total(rd_diff_vacc) +net_by_infection_total(rd_diff_vacc) ``` We can see that we more rapidly reach a situation in which vaccinated and @@ -968,12 +968,12 @@ Which are the highest eigenvector centrality nodes in this network? ``` ```{r eigen-hint} -node_eigenvector(ison_networkers) +node_by_eigenvector(ison_networkers) ison_networkers %>% - mutate(who_to_convince = node_is_max(node_eigenvector(ison_networkers))) %>% + mutate(who_to_convince = node_is_max(node_by_eigenvector(ison_networkers))) %>% graphr(node_color = who_to_convince) beliefs2 <- rep(0, net_nodes(ison_networkers)) -beliefs2[node_is_max(node_eigenvector(ison_networkers))] <- 1 +beliefs2[node_is_max(node_by_eigenvector(ison_networkers))] <- 1 ison_networkers %>% mutate(beliefs = beliefs2) %>% graphr(node_color = "beliefs") (netlearn2 <- play_learning(ison_networkers, beliefs2)) plot(netlearn2) diff --git a/inst/tutorials/tutorial7/diffusion.html b/inst/tutorials/tutorial7/diffusion.html index 7a9ee72a..9459e1bf 100644 --- a/inst/tutorials/tutorial7/diffusion.html +++ b/inst/tutorials/tutorial7/diffusion.html @@ -13,7 +13,7 @@ - +
net_infection_complete().
+net_by_infection_complete().
net_infection_complete(play_diffusion(create_ring(32, width = 2)))
-net_infection_complete(play_diffusion(generate_random(32, 0.15)))
-net_infection_complete(play_diffusion(generate_scalefree(32, 0.025)))
-net_infection_complete(play_diffusion(generate_smallworld(32, 0.025)))
+net_by_infection_complete(play_diffusion(create_ring(32, width = 2)))
+net_by_infection_complete(play_diffusion(generate_random(32, 0.15)))
+net_by_infection_complete(play_diffusion(generate_scalefree(32, 0.025)))
+net_by_infection_complete(play_diffusion(generate_smallworld(32, 0.025)))
us_diff <- play_diffusion(irps_usgeo, seeds = 5)
plot(us_diff)
graphr(us_diff)
-net_infection_complete(us_diff)
+net_by_infection_complete(us_diff)
What’s happening here? Can you interpret this?
@@ -631,9 +631,9 @@sf <- generate_scalefree(32, 0.025)
sf %>%
as_tidygraph() %>%
- mutate(degree = ifelse(node_is_max(node_degree(sf)) == TRUE, "max",
- ifelse(node_is_min(node_degree(sf)) == TRUE, "min", "others"))) %>%
- graphr(node_color = "degree") + guides(color = "legend") + labs(color = "degree")
+ mutate(degree = ifelse(node_is_max(node_by_degree(sf)) == TRUE, "max",
+ ifelse(node_is_min(node_by_degree(sf)) == TRUE, "min", "others"))) %>%
+ graphr(node_color = "degree") + guides(color = "legend")
plot(play_diffusion(sf, seeds = 10, steps = 10))
plot(play_diffusion(sf, seeds = node_is_random(sf), steps = 10))
-plot(play_diffusion(sf, seeds = node_is_max(node_degree(sf)), steps = 10))
-plot(play_diffusion(sf, seeds = node_is_min(node_degree(sf)), steps = 10))
+plot(play_diffusion(sf, seeds = node_is_max(node_by_degree(sf)), steps = 10))
+plot(play_diffusion(sf, seeds = node_is_min(node_by_degree(sf)), steps = 10))
# visualise diffusion in scalefree network
-graphs(play_diffusion(sf, seeds = node_is_min(node_degree(sf)), steps = 10))
+graphs(play_diffusion(sf, seeds = node_is_min(node_by_degree(sf)), steps = 10))
graphs(play_diffusion(sf, seeds = 16, steps = 10), waves = 1:3)
So how can we establish the \(R_0\)
-here? We can use net_reproduction().
net_by_reproduction().
rd_diff <- play_diffusion(rando, transmissibility = 0.25, recovery = 0.05)
plot(rd_diff)
# R-nought
-net_reproduction(rd_diff)
-net_infection_total(rd_diff)
+net_by_reproduction(rd_diff)
+net_by_infection_total(rd_diff)
net_immunity() gives us the
+immunity to be achieved. net_by_immunity() gives us the
proportion of the population that would need to be recovered or
vaccinated for the network to have herd immunity.
# Herd Immunity Threshold
-net_immunity(rd_diff)
-net_immunity(rd_diff, normalized = FALSE)
+net_by_immunity(rd_diff)
+net_by_immunity(rd_diff, normalized = FALSE)
In this model, the HIT score indicates a good proportion of nodes in @@ -950,7 +950,7 @@
rd_diff_vacc <- play_diffusion(rando, transmissibility = 0.25, recovery = 0.05,
immune = 2:9)
plot(rd_diff_vacc)
-net_infection_total(rd_diff_vacc)
+net_by_infection_total(rd_diff_vacc)
We can see that we more rapidly reach a situation in which vaccinated @@ -1118,12 +1118,12 @@
node_eigenvector(ison_networkers)
+node_by_eigenvector(ison_networkers)
ison_networkers %>%
- mutate(who_to_convince = node_is_max(node_eigenvector(ison_networkers))) %>%
+ mutate(who_to_convince = node_is_max(node_by_eigenvector(ison_networkers))) %>%
graphr(node_color = who_to_convince)
beliefs2 <- rep(0, net_nodes(ison_networkers))
-beliefs2[node_is_max(node_eigenvector(ison_networkers))] <- 1
+beliefs2[node_is_max(node_by_eigenvector(ison_networkers))] <- 1
ison_networkers %>% mutate(beliefs = beliefs2) %>% graphr(node_color = "beliefs")
(netlearn2 <- play_learning(ison_networkers, beliefs2))
plot(netlearn2)
@@ -1536,7 +1536,7 @@ Glossary
learnr:::store_exercise_cache(structure(list(label = "completeinfection", global_setup = structure(c("library(learnr)",
"library(migraph)", "clear_glossary()", "knitr::opts_chunk$set(echo = FALSE)"
), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL,
- chunks = list(list(label = "completeinfection", code = "net_infection_complete(play_diffusion(create_ring(32, width = 2)))\nnet_infection_complete(play_diffusion(generate_random(32, 0.15)))\nnet_infection_complete(play_diffusion(generate_scalefree(32, 0.025)))\nnet_infection_complete(play_diffusion(generate_smallworld(32, 0.025)))",
+ chunks = list(list(label = "completeinfection", code = "net_by_infection_complete(play_diffusion(create_ring(32, width = 2)))\nnet_by_infection_complete(play_diffusion(generate_random(32, 0.15)))\nnet_by_infection_complete(play_diffusion(generate_scalefree(32, 0.025)))\nnet_by_infection_complete(play_diffusion(generate_smallworld(32, 0.025)))",
opts = list(label = "\"completeinfection\"", exercise = "TRUE",
purl = "FALSE", fig.width = "9"), engine = "r")),
code_check = NULL, error_check = NULL, check = NULL, solution = NULL,
@@ -1557,10 +1557,10 @@ Glossary
message = TRUE, render = NULL, ref.label = NULL, child = NULL,
engine = "r", split = FALSE, include = TRUE, purl = FALSE,
max.print = 1000, label = "completeinfection", exercise = TRUE,
- code = c("net_infection_complete(play_diffusion(create_ring(32, width = 2)))",
- "net_infection_complete(play_diffusion(generate_random(32, 0.15)))",
- "net_infection_complete(play_diffusion(generate_scalefree(32, 0.025)))",
- "net_infection_complete(play_diffusion(generate_smallworld(32, 0.025)))"
+ code = c("net_by_infection_complete(play_diffusion(create_ring(32, width = 2)))",
+ "net_by_infection_complete(play_diffusion(generate_random(32, 0.15)))",
+ "net_by_infection_complete(play_diffusion(generate_scalefree(32, 0.025)))",
+ "net_by_infection_complete(play_diffusion(generate_smallworld(32, 0.025)))"
), out.width.px = 864, out.height.px = 384, params.src = "completeinfection, exercise = TRUE, purl = FALSE, fig.width=9",
fig.num = 0, exercise.df_print = "paged", exercise.checker = "NULL"),
engine = "r", version = "4"), class = c("r", "tutorial_exercise"
@@ -1570,11 +1570,11 @@ Glossary
@@ -1676,19 +1676,19 @@ Glossary
@@ -1746,10 +1746,10 @@ Glossary
@@ -1810,10 +1810,10 @@ Glossary
@@ -1998,10 +1998,10 @@ Glossary
@@ -2104,10 +2104,10 @@ Glossary
@@ -2172,10 +2172,10 @@ Glossary
@@ -2202,7 +2202,7 @@ Glossary
learnr:::store_exercise_cache(structure(list(label = "sf", global_setup = structure(c("library(learnr)",
"library(migraph)", "clear_glossary()", "knitr::opts_chunk$set(echo = FALSE)"
), chunk_opts = list(label = "setup", include = FALSE)), setup = NULL,
- chunks = list(list(label = "sf", code = "sf <- generate_scalefree(32, 0.025)\nsf %>%\n as_tidygraph() %>%\n mutate(degree = ifelse(node_is_max(node_degree(sf)) == TRUE, \"max\",\n ifelse(node_is_min(node_degree(sf)) == TRUE, \"min\", \"others\"))) %>%\n graphr(node_color = \"degree\") + guides(color = \"legend\") + labs(color = \"degree\")",
+ chunks = list(list(label = "sf", code = "sf <- generate_scalefree(32, 0.025)\nsf %>%\n as_tidygraph() %>%\n mutate(degree = ifelse(node_is_max(node_by_degree(sf)) == TRUE, \"max\",\n ifelse(node_is_min(node_by_degree(sf)) == TRUE, \"min\", \"others\"))) %>%\n graphr(node_color = \"degree\") + guides(color = \"legend\")",
opts = list(label = "\"sf\"", exercise = "TRUE", fig.width = "9"),
engine = "r")), code_check = NULL, error_check = NULL,
check = NULL, solution = NULL, tests = NULL, options = list(
@@ -2222,9 +2222,9 @@ Glossary
message = TRUE, render = NULL, ref.label = NULL, child = NULL,
engine = "r", split = FALSE, include = TRUE, purl = TRUE,
max.print = 1000, label = "sf", exercise = TRUE, code = c("sf <- generate_scalefree(32, 0.025)",
- "sf %>%", " as_tidygraph() %>%", " mutate(degree = ifelse(node_is_max(node_degree(sf)) == TRUE, \"max\",",
- " ifelse(node_is_min(node_degree(sf)) == TRUE, \"min\", \"others\"))) %>%",
- " graphr(node_color = \"degree\") + guides(color = \"legend\") + labs(color = \"degree\")"
+ "sf %>%", " as_tidygraph() %>%", " mutate(degree = ifelse(node_is_max(node_by_degree(sf)) == TRUE, \"max\",",
+ " ifelse(node_is_min(node_by_degree(sf)) == TRUE, \"min\", \"others\"))) %>%",
+ " graphr(node_color = \"degree\") + guides(color = \"legend\")"
), out.width.px = 864, out.height.px = 384, params.src = "sf, exercise=TRUE, fig.width=9",
fig.num = 0, exercise.df_print = "paged", exercise.checker = "NULL"),
engine = "r", version = "4"), class = c("r", "tutorial_exercise"
@@ -2242,17 +2242,17 @@ Glossary
@@ -2325,8 +2325,8 @@ Glossary
@@ -2634,16 +2634,16 @@ Glossary
@@ -2719,7 +2719,7 @@ Glossary
chunks = list(list(label = "diffusions", code = "rando <- generate_random(32, 0.1)\ngraphr(rando)\nplot(play_diffusions(rando, transmissibility = 0.5, times = 5, steps = 10))",
opts = list(label = "\"diffusions\"", exercise = "TRUE",
fig.width = "9"), engine = "r"), list(label = "r0",
- code = "rd_diff <- play_diffusion(rando, transmissibility = 0.25, recovery = 0.05)\nplot(rd_diff)\n# R-nought\nnet_reproduction(rd_diff)\nnet_infection_total(rd_diff)",
+ code = "rd_diff <- play_diffusion(rando, transmissibility = 0.25, recovery = 0.05)\nplot(rd_diff)\n# R-nought\nnet_by_reproduction(rd_diff)\nnet_by_infection_total(rd_diff)",
opts = list(label = "\"r0\"", exercise = "TRUE", exercise.setup = "\"diffusions\"",
fig.width = "9"), engine = "r")), code_check = NULL,
error_check = NULL, check = NULL, solution = NULL, tests = NULL,
@@ -2741,8 +2741,8 @@ Glossary
engine = "r", split = FALSE, include = TRUE, purl = TRUE,
max.print = 1000, label = "r0", exercise = TRUE, exercise.setup = "diffusions",
code = c("rd_diff <- play_diffusion(rando, transmissibility = 0.25, recovery = 0.05)",
- "plot(rd_diff)", "# R-nought", "net_reproduction(rd_diff)",
- "net_infection_total(rd_diff)"), out.width.px = 864,
+ "plot(rd_diff)", "# R-nought", "net_by_reproduction(rd_diff)",
+ "net_by_infection_total(rd_diff)"), out.width.px = 864,
out.height.px = 384, params.src = "r0, exercise = TRUE, exercise.setup = \"diffusions\", fig.width=9",
fig.num = 0, exercise.df_print = "paged", exercise.checker = "NULL"),
engine = "r", version = "4"), class = c("r", "tutorial_exercise"
@@ -2752,16 +2752,16 @@ Glossary
@@ -2781,17 +2781,17 @@ Glossary
@@ -2819,14 +2819,14 @@ Glossary
@@ -3006,20 +3006,20 @@ Glossary
@@ -3081,27 +3081,27 @@ Glossary
@@ -3160,12 +3160,12 @@ Glossary
For this session, we’ll explore a couple of different datasets.
First, let’s examine
homogeneity/
-heterogeneity in the Marvel relationships dataset from
-{manynet}, ison_marvel_relationships. The
-dataset is quite complicated, so to make this simpler, let’s concentrate
-on:
{manynet}, fict_marvel. The dataset is quite
+complicated, so to make this simpler, let’s concentrate on:
to_uniplex(____, "relationship")
+# since the dataset is a 'signed' graph, we want to get just the
# positively signed ties to get the friendship graph
# (and lose the enmity relations)
to_unsigned(____, keep = "positive")
# to_giant() is a quick easy way to get the giant/main component
to_giant(____)
to_subgraph(____, Appearances >= mean(Appearances))
-# don't forget to assign the results!
-marvel_friends <- ____
+to_subgraph(____, Appearances >= mean(Appearances))
net_richness(____, ____)
+net_by_richness(____, ____)
net_richness(marvel_friends, "Gender")
-net_richness(marvel_friends, "PowerOrigin")
-net_richness(marvel_friends, "Attractive")
-net_richness(marvel_friends, "Rich")
-net_richness(marvel_friends, "Intellect")
+net_by_richness(marvel_friends, "Gender")
+net_by_richness(marvel_friends, "PowerOrigin")
+net_by_richness(marvel_friends, "Attractive")
+net_by_richness(marvel_friends, "Rich")
+net_by_richness(marvel_friends, "Intellect")
net_diversity(____, ____)
+net_by_diversity(____, ____)
net_diversity(marvel_friends, "Gender")
-net_diversity(marvel_friends, "PowerOrigin")
-net_diversity(marvel_friends, "Attractive")
-net_diversity(marvel_friends, "Rich")
-net_diversity(marvel_friends, "Intellect")
+net_by_diversity(marvel_friends, "Gender")
+net_by_diversity(marvel_friends, "PowerOrigin")
+net_by_diversity(marvel_friends, "Attractive")
+net_by_diversity(marvel_friends, "Rich")
+net_by_diversity(marvel_friends, "Intellect")
Looks like there is more diversity in terms of where these characters got their powers, whether they have significant intellectual powers, and @@ -338,9 +337,9 @@
over_membership(marvel_friends, net_diversity, "Gender",
+over_membership(marvel_friends, net_by_diversity, "Gender",
membership = "PowerOrigin")
-over_membership(marvel_friends, net_diversity, "Intellect",
+over_membership(marvel_friends, net_by_diversity, "Intellect",
membership = "Gender")
Note that the length of the vector returned as a result is the number @@ -380,7 +379,7 @@
net_heterophily()).
+net_by_heterophily()).
Check how homophilic three variables in the network are, “Gender”, “PowerOrigin”, and “Attractive”. Please assign the results so that we can use them later.
@@ -392,14 +391,14 @@net_heterophily(____, ____)
+net_by_heterophily(____, ____)
(obs.gender <- net_heterophily(marvel_friends, "Gender"))
-(obs.powers <- net_heterophily(marvel_friends, "PowerOrigin"))
-(obs.attract <- net_heterophily(marvel_friends, "Attractive"))
+(obs.gender <- net_by_heterophily(marvel_friends, "Gender"))
+(obs.powers <- net_by_heterophily(marvel_friends, "PowerOrigin"))
+(obs.attract <- net_by_heterophily(marvel_friends, "Attractive"))
rand.gender <- test_random(marvel_friends,
- net_heterophily, attribute = "Gender",
+ net_by_heterophily, attribute = "Gender",
times = 1000)
rand.power <- test_random(marvel_friends,
- net_heterophily, attribute = "PowerOrigin",
+ net_by_heterophily, attribute = "PowerOrigin",
times = 1000)
rand.attract <- test_random(marvel_friends,
- net_heterophily, attribute = "Attractive",
+ net_by_heterophily, attribute = "Attractive",
times = 1000)
plot(rand.gender) + ggtitle("CUG test results for 'Gender' attribute")
plot(rand.power) + ggtitle("CUG test results for 'PowerOrigin' attribute")
@@ -520,7 +519,7 @@ QAP tests
-graphr(generate_permutation(____, with_attr = TRUE), ____)
+graphr(to_permuted(____, with_attr = TRUE), ____)
QAP tests
labels = FALSE, node_size = 6,
node_color = "PowerOrigin",
node_shape = "Gender") + ggtitle("Original network")
-new <- graphr(generate_permutation(marvel_friends, with_attr = TRUE),
+new <- graphr(to_permuted(marvel_friends, with_attr = TRUE),
labels = FALSE, node_size = 6,
node_color = "PowerOrigin",
node_shape = "Gender") + ggtitle("Permuted network")
@@ -564,10 +563,10 @@ QAP tests
data-completion="1" data-diagnostics="1" data-startover="1"
data-lines="0" data-pipe="|>">
(perm.gender <- test_permutation(marvel_friends,
- net_heterophily, attribute = "Gender",
+ net_by_heterophily, attribute = "Gender",
times = 1000))
(perm.power <- test_permutation(marvel_friends,
- net_heterophily, attribute = "PowerOrigin",
+ net_by_heterophily, attribute = "PowerOrigin",
times = 1000))
(plot(rand.gender) + ggtitle("CUG test results for 'Gender' attribute") +
@@ -688,7 +687,8 @@ Network regression
-tidy(model1)
+model1
+tidy(model1)
glance(model1)
plot(model1)
@@ -854,7 +854,7 @@ Glossary
"Bravo!",
"Super!"),
encouragement = c("Bon effort"))
-marvel_friends <- to_unsigned(ison_marvel_relationships, keep = "positive")
+marvel_friends <- to_unsigned(to_uniplex(fict_marvel, "relationship"), keep = "positive")
marvel_friends <- to_giant(marvel_friends)
marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))
@@ -909,7 +909,7 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "friends",
@@ -971,7 +971,7 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "plotfriends",
@@ -1033,16 +1033,16 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "rich",
code = "net_node_attributes(marvel_friends)", opts = list(
label = "\"rich\"", exercise = "TRUE", purl = "FALSE"),
engine = "r")), code_check = NULL, error_check = NULL, check = NULL,
- solution = structure(c("net_richness(marvel_friends, \"Gender\")",
- "net_richness(marvel_friends, \"PowerOrigin\")", "net_richness(marvel_friends, \"Attractive\")",
- "net_richness(marvel_friends, \"Rich\")", "net_richness(marvel_friends, \"Intellect\")"
+ solution = structure(c("net_by_richness(marvel_friends, \"Gender\")",
+ "net_by_richness(marvel_friends, \"PowerOrigin\")", "net_by_richness(marvel_friends, \"Attractive\")",
+ "net_by_richness(marvel_friends, \"Rich\")", "net_by_richness(marvel_friends, \"Intellect\")"
), chunk_opts = list(label = "rich-solution")), tests = NULL,
options = list(eval = FALSE, echo = TRUE, results = "markup",
tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE,
@@ -1070,20 +1070,20 @@ Glossary
@@ -1128,15 +1128,15 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "blau",
code = "", opts = list(label = "\"blau\"", exercise = "TRUE",
purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL,
- check = NULL, solution = structure(c("net_diversity(marvel_friends, \"Gender\")",
- "net_diversity(marvel_friends, \"PowerOrigin\")", "net_diversity(marvel_friends, \"Attractive\")",
- "net_diversity(marvel_friends, \"Rich\")", "net_diversity(marvel_friends, \"Intellect\")"
+ check = NULL, solution = structure(c("net_by_diversity(marvel_friends, \"Gender\")",
+ "net_by_diversity(marvel_friends, \"PowerOrigin\")", "net_by_diversity(marvel_friends, \"Attractive\")",
+ "net_by_diversity(marvel_friends, \"Rich\")", "net_by_diversity(marvel_friends, \"Intellect\")"
), chunk_opts = list(label = "blau-solution")), tests = NULL,
options = list(eval = FALSE, echo = TRUE, results = "markup",
tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE,
@@ -1191,14 +1191,14 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "crossref",
code = "", opts = list(label = "\"crossref\"", exercise = "TRUE",
purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL,
- check = NULL, solution = structure(c("over_membership(marvel_friends, net_diversity, \"Gender\", ",
- " membership = \"PowerOrigin\")", "over_membership(marvel_friends, net_diversity, \"Intellect\", ",
+ check = NULL, solution = structure(c("over_membership(marvel_friends, net_by_diversity, \"Gender\", ",
+ " membership = \"PowerOrigin\")", "over_membership(marvel_friends, net_by_diversity, \"Intellect\", ",
" membership = \"Gender\")"), chunk_opts = list(
label = "crossref-solution")), tests = NULL, options = list(
eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE,
@@ -1253,15 +1253,15 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "ei",
code = "", opts = list(label = "\"ei\"", exercise = "TRUE",
purl = "FALSE"), engine = "r")), code_check = NULL, error_check = NULL,
- check = NULL, solution = structure(c("(obs.gender <- net_heterophily(marvel_friends, \"Gender\"))",
- "(obs.powers <- net_heterophily(marvel_friends, \"PowerOrigin\")) ",
- "(obs.attract <- net_heterophily(marvel_friends, \"Attractive\")) "
+ check = NULL, solution = structure(c("(obs.gender <- net_by_heterophily(marvel_friends, \"Gender\"))",
+ "(obs.powers <- net_by_heterophily(marvel_friends, \"PowerOrigin\")) ",
+ "(obs.attract <- net_by_heterophily(marvel_friends, \"Attractive\")) "
), chunk_opts = list(label = "ei-solution")), tests = NULL,
options = list(eval = FALSE, echo = TRUE, results = "markup",
tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE,
@@ -1290,15 +1290,15 @@ Glossary
@@ -1344,7 +1344,7 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "rando",
@@ -1352,11 +1352,11 @@ Glossary
purl = "FALSE", exercise.timelimit = "120", fig.width = "9"),
engine = "r")), code_check = NULL, error_check = NULL, check = NULL,
solution = structure(c("rand.gender <- test_random(marvel_friends, ",
- " net_heterophily, attribute = \"Gender\", ",
+ " net_by_heterophily, attribute = \"Gender\", ",
" times = 1000)", "rand.power <- test_random(marvel_friends, ",
- " net_heterophily, attribute = \"PowerOrigin\", ",
+ " net_by_heterophily, attribute = \"PowerOrigin\", ",
" times = 1000)", "rand.attract <- test_random(marvel_friends, ",
- " net_heterophily, attribute = \"Attractive\", ",
+ " net_by_heterophily, attribute = \"Attractive\", ",
" times = 1000)", "plot(rand.gender) + ggtitle(\"CUG test results for 'Gender' attribute\")",
"plot(rand.power) + ggtitle(\"CUG test results for 'PowerOrigin' attribute\")",
"plot(rand.attract) + ggtitle(\"CUG test results for 'Attractive' attribute\")"
@@ -1387,25 +1387,25 @@ Glossary
@@ -1453,7 +1453,7 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "perm",
@@ -1462,7 +1462,7 @@ Glossary
error_check = NULL, check = NULL, solution = structure(c("old <- graphr(marvel_friends,",
" labels = FALSE, node_size = 6,", " node_color = \"PowerOrigin\",",
" node_shape = \"Gender\") + ggtitle(\"Original network\")",
- "new <- graphr(generate_permutation(marvel_friends, with_attr = TRUE),",
+ "new <- graphr(to_permuted(marvel_friends, with_attr = TRUE),",
" labels = FALSE, node_size = 6,", " node_color = \"PowerOrigin\",",
" node_shape = \"Gender\") + ggtitle(\"Permuted network\")",
"old + new"), chunk_opts = list(label = "perm-solution")),
@@ -1493,25 +1493,25 @@ Glossary
@@ -1559,20 +1559,20 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
- eval = TRUE)), setup = "rand.gender <- test_random(marvel_friends, \n net_heterophily, attribute = \"Gender\", \n times = 1000)\nrand.power <- test_random(marvel_friends, \n net_heterophily, attribute = \"PowerOrigin\", \n times = 1000)\nrand.attract <- test_random(marvel_friends, \n net_heterophily, attribute = \"Attractive\", \n times = 1000)\nplot(rand.gender) + ggtitle(\"CUG test results for 'Gender' attribute\")\nplot(rand.power) + ggtitle(\"CUG test results for 'PowerOrigin' attribute\")\nplot(rand.attract) + ggtitle(\"CUG test results for 'Attractive' attribute\")",
- chunks = list(list(label = "rando-solution", code = "rand.gender <- test_random(marvel_friends, \n net_heterophily, attribute = \"Gender\", \n times = 1000)\nrand.power <- test_random(marvel_friends, \n net_heterophily, attribute = \"PowerOrigin\", \n times = 1000)\nrand.attract <- test_random(marvel_friends, \n net_heterophily, attribute = \"Attractive\", \n times = 1000)\nplot(rand.gender) + ggtitle(\"CUG test results for 'Gender' attribute\")\nplot(rand.power) + ggtitle(\"CUG test results for 'PowerOrigin' attribute\")\nplot(rand.attract) + ggtitle(\"CUG test results for 'Attractive' attribute\")",
+ eval = TRUE)), setup = "rand.gender <- test_random(marvel_friends, \n net_by_heterophily, attribute = \"Gender\", \n times = 1000)\nrand.power <- test_random(marvel_friends, \n net_by_heterophily, attribute = \"PowerOrigin\", \n times = 1000)\nrand.attract <- test_random(marvel_friends, \n net_by_heterophily, attribute = \"Attractive\", \n times = 1000)\nplot(rand.gender) + ggtitle(\"CUG test results for 'Gender' attribute\")\nplot(rand.power) + ggtitle(\"CUG test results for 'PowerOrigin' attribute\")\nplot(rand.attract) + ggtitle(\"CUG test results for 'Attractive' attribute\")",
+ chunks = list(list(label = "rando-solution", code = "rand.gender <- test_random(marvel_friends, \n net_by_heterophily, attribute = \"Gender\", \n times = 1000)\nrand.power <- test_random(marvel_friends, \n net_by_heterophily, attribute = \"PowerOrigin\", \n times = 1000)\nrand.attract <- test_random(marvel_friends, \n net_by_heterophily, attribute = \"Attractive\", \n times = 1000)\nplot(rand.gender) + ggtitle(\"CUG test results for 'Gender' attribute\")\nplot(rand.power) + ggtitle(\"CUG test results for 'PowerOrigin' attribute\")\nplot(rand.attract) + ggtitle(\"CUG test results for 'Attractive' attribute\")",
opts = list(label = "\"rando-solution\""), engine = "r"),
list(label = "testperm", code = "", opts = list(label = "\"testperm\"",
exercise = "TRUE", exercise.setup = "\"rando-solution\"",
purl = "FALSE", exercise.timelimit = "120", fig.width = "9"),
engine = "r")), code_check = NULL, error_check = NULL,
check = NULL, solution = structure(c("(perm.gender <- test_permutation(marvel_friends, ",
- " net_heterophily, attribute = \"Gender\",",
+ " net_by_heterophily, attribute = \"Gender\",",
" times = 1000))", "(perm.power <- test_permutation(marvel_friends, ",
- " net_heterophily, attribute = \"PowerOrigin\",",
+ " net_by_heterophily, attribute = \"PowerOrigin\",",
" times = 1000))", "", "(plot(rand.gender) + ggtitle(\"CUG test results for 'Gender' attribute\") + ",
" theme(plot.title = element_text(size=8)) | ", " plot(rand.power) + ggtitle(\"CUG test results for 'PowerOrigin' attribute\") + ",
" theme(plot.title = element_text(size=8))) /", "(plot(perm.gender) + ggtitle(\"QAP test results for 'Gender' attribute\") + ",
@@ -1605,25 +1605,25 @@ Glossary
@@ -1671,7 +1671,7 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "introeies",
@@ -1733,7 +1733,7 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "qapmax",
@@ -1796,7 +1796,7 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = "model1 <- net_regression(weight ~ ego(Citations) + alter(Citations) + sim(Citations) + \n ego(Discipline) + same(Discipline), \n ison_networkers, times = 200)",
@@ -1805,16 +1805,16 @@ Glossary
list(label = "qapinterp", code = "", opts = list(label = "\"qapinterp\"",
exercise = "TRUE", exercise.setup = "\"qapmax-solution\"",
purl = "FALSE", fig.width = "9"), engine = "r")),
- code_check = NULL, error_check = NULL, check = NULL, solution = structure(c("tidy(model1)",
- "glance(model1)", "plot(model1)"), chunk_opts = list(label = "qapinterp-solution")),
- tests = NULL, options = list(eval = FALSE, echo = TRUE, results = "markup",
- tidy = FALSE, tidy.opts = NULL, collapse = FALSE, prompt = FALSE,
- comment = NA, highlight = FALSE, size = "normalsize",
- background = "#F7F7F7", strip.white = TRUE, cache = 0,
- cache.path = "diversity_cache/html/", cache.vars = NULL,
- cache.lazy = TRUE, dependson = NULL, autodep = FALSE,
- cache.rebuild = FALSE, fig.keep = "high", fig.show = "asis",
- fig.align = "default", fig.path = "diversity_files/figure-html/",
+ code_check = NULL, error_check = NULL, check = NULL, solution = structure(c("model1",
+ "tidy(model1)", "glance(model1)", "plot(model1)"), chunk_opts = list(
+ label = "qapinterp-solution")), tests = NULL, options = list(
+ eval = FALSE, echo = TRUE, results = "markup", tidy = FALSE,
+ tidy.opts = NULL, collapse = FALSE, prompt = FALSE, comment = NA,
+ highlight = FALSE, size = "normalsize", background = "#F7F7F7",
+ strip.white = TRUE, cache = 0, cache.path = "diversity_cache/html/",
+ cache.vars = NULL, cache.lazy = TRUE, dependson = NULL,
+ autodep = FALSE, cache.rebuild = FALSE, fig.keep = "high",
+ fig.show = "asis", fig.align = "default", fig.path = "diversity_files/figure-html/",
dev = "png", dev.args = NULL, dpi = 192, fig.ext = "png",
fig.width = 9, fig.height = 4, fig.env = "figure", fig.cap = NULL,
fig.scap = NULL, fig.lp = "fig:", fig.subcap = NULL,
@@ -1834,31 +1834,31 @@ Glossary
@@ -1904,7 +1904,7 @@ Glossary
"learnr::random_phrases_add(language = \"en\", ", " praise = c(\"C'est génial!\",",
" \"Beau travail!\",", " \"Bravo!\",",
" \"Super!\"),", " encouragement = c(\"Bon effort\"))",
-"marvel_friends <- to_unsigned(ison_marvel_relationships, keep = \"positive\")",
+"marvel_friends <- to_unsigned(to_uniplex(fict_marvel, \"relationship\"), keep = \"positive\")",
"marvel_friends <- to_giant(marvel_friends)", "marvel_friends <- marvel_friends %>% to_subgraph(Appearances >= mean(Appearances))"
), chunk_opts = list(label = "setup", include = FALSE, purl = FALSE,
eval = TRUE)), setup = NULL, chunks = list(list(label = "freeplay",
@@ -1936,12 +1936,12 @@ Glossary
diff --git a/inst/tutorials/tutorial9/ergm.html b/inst/tutorials/tutorial9/ergm.html
index a988109a..57cd9d6e 100644
--- a/inst/tutorials/tutorial9/ergm.html
+++ b/inst/tutorials/tutorial9/ergm.html
@@ -13,7 +13,7 @@
-
+
Modelling with ERGMs
@@ -139,10 +139,10 @@ Data
-data(package='ergm') # tells us the datasets in our packages
+# data(package='ergm') # tells us the datasets in our packages
data(florentine) # loads flomarriage and flobusiness data
flomarriage # Data on marriage alliances among Renaissance Florentine families
-?florentine # You can see more information about the attributes in the help file.
+# ?florentine # You can see more information about the attributes in the help file.
We see 16 nodes and 20 edges in this undirected network. Let’s get a
@@ -152,7 +152,7 @@
Data
-net_density(flomarriage)
+net_by_density(flomarriage)
net_node_attributes(flomarriage)
net_tie_attributes(flomarriage)
@@ -169,7 +169,7 @@ Visualisation
data-completion="1" data-diagnostics="1" data-startover="1"
data-lines="0" data-pipe="|>">
graphr(flomarriage, node_size = "wealth")
-as_tidygraph(flomarriage) %>% mutate_nodes(Degree = node_deg()) %>%
+as_tidygraph(flomarriage) %>% mutate_nodes(Degree = node_by_deg()) %>%
mutate_ties(Triangles = tie_is_triangular()) %>%
graphr(node_size = "Degree", edge_color = "Triangles")
@@ -357,7 +357,7 @@ Predicted Probabilities
one-unit change in \(X\) on \(Pr(Y_i = 1)\), how much change there is
depends critically on “where you are on the curve”.
-
+
This means we need to calculate predicted probabilities of \(Y\) at specific values of key predictors.
In our simple Bernoulli model, we’re modelling only the number of edges,
@@ -468,7 +468,7 @@
GOFing
Markov model
-
+
Let’s review what we know about the marriage network to get a better
fit. There are some nodes with more activity (higher degree) in this
network than others, and some triangle configurations appear. Yet our
@@ -811,6 +811,7 @@
Free play
@@ -1254,8 +1255,8 @@
Free play
@@ -2558,8 +2559,8 @@ Free play
diff --git a/inst/tutorials/tutorial9/ergm_files/figure-html/visflo2-1.png b/inst/tutorials/tutorial9/ergm_files/figure-html/visflo2-1.png
index 58bd1384..3fbf1f3e 100644
Binary files a/inst/tutorials/tutorial9/ergm_files/figure-html/visflo2-1.png and b/inst/tutorials/tutorial9/ergm_files/figure-html/visflo2-1.png differ
diff --git a/man/figures/logo.png b/man/figures/logo.png
index 0c58f72e..3ed230d5 100644
Binary files a/man/figures/logo.png and b/man/figures/logo.png differ
diff --git a/tests/testthat/test-model_distrib.R b/tests/testthat/test-model_distrib.R
index b41a1a91..70c8b481 100644
--- a/tests/testthat/test-model_distrib.R
+++ b/tests/testthat/test-model_distrib.R
@@ -3,3 +3,10 @@ test_that("test_distribution works", {
as_diffusion(play_diffusion(ison_networkers, thresholds = 75)))
expect_output(print(res), "statistic")
})
+
+test_that("test_fit works", {
+ x <- play_diffusion(generate_random(15), transmissibility = 0.7)
+ y <- play_diffusions(generate_random(15), transmissibility = 0.1, times = 40)
+ res <- test_fit(as_diffusion(x), y)
+ expect_output(print(res), "statistic")
+})
\ No newline at end of file
diff --git a/tests/testthat/test-model_predict.R b/tests/testthat/test-model_predict.R
new file mode 100644
index 00000000..1940ee1b
--- /dev/null
+++ b/tests/testthat/test-model_predict.R
@@ -0,0 +1,23 @@
+test_that("predict.netlm works", {
+ networkers <- ison_networkers %>% to_subgraph(Discipline == "Sociology")
+ model1 <- net_regression(weight ~ ego(Citations) + alter(Citations) + sim(Citations),
+ networkers, times = 10)
+ pred <- predict(model1, matrix(c(1,10,5,2),1,4))
+ expect_length(pred, 1)
+ expect_type(pred, "double")
+})
+
+test_that("predict.netlogit works", {
+ networkers <- ison_networkers %>% to_subgraph(Discipline == "Sociology") %>%
+ to_unweighted()
+ model1 <- net_regression(. ~ ego(Citations) + alter(Citations) + sim(Citations),
+ networkers, times = 10)
+ pred_link <- predict(model1, matrix(c(1,10,5,2),1,4), type = "link")
+ pred_response <- predict(model1, matrix(c(1,10,5,2),1,4), type = "response")
+
+ expect_length(pred_link, 1)
+ expect_type(pred_link, "double")
+
+ expect_length(pred_response, 1)
+ expect_type(pred_response, "double")
+})
\ No newline at end of file
diff --git a/tests/testthat/test-tutorials_netrics.R b/tests/testthat/test-tutorials_netrics.R
new file mode 100644
index 00000000..912f638a
--- /dev/null
+++ b/tests/testthat/test-tutorials_netrics.R
@@ -0,0 +1,7 @@
+test_that("netrics tutorials work", {
+ for(tute.dir in list.dirs(system.file("tutorials", package = "netrics"),
+ recursive = F)){
+ tute.file <- list.files(tute.dir, pattern = "*.Rmd", full.names = T)
+ expect_null(test_tutorials(tute.file))
+ }
+})
\ No newline at end of file
diff --git a/tests/testthat/test-tutorials_run.R b/tests/testthat/test-tutorials_run.R
new file mode 100644
index 00000000..fa838886
--- /dev/null
+++ b/tests/testthat/test-tutorials_run.R
@@ -0,0 +1,9 @@
+test_that("run_tute() runs without error when missing argument", {
+ skip_if_not_installed("learnr")
+ expect_error(run_tute(), NA)
+})
+
+test_that("extract_tute() runs without error when missing argument", {
+ skip_if_not_installed("learnr")
+ expect_error(extract_tute(), NA)
+})
\ No newline at end of file