rm(list=ls())
gc()
## used (Mb) gc trigger (Mb) max used (Mb)
## Ncells 505927 27.1 1123384 60 644242 34.5
## Vcells 894114 6.9 8388608 64 1636217 12.5
# install.packages("ahpsurvey",repos = "http://cran.us.r-project.org")
library(ahpsurvey)
library(dplyr)
##
## 다음의 패키지를 부착합니다: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
df1 <- readxl::read_excel("k1.xlsx")
df2 <- readxl::read_excel("k2.xlsx")
df3 <- readxl::read_excel("k3.xlsx")
df4 <- readxl::read_excel("k4.xlsx")
df_cat <- readxl::read_excel("df_cat.xlsx")
head(df1)
head(df2)
head(df3)
head(df4)
atts1 <- c("inter", "inner", "actor")
atts2 <- c("inter", "foreign", "together", "safety")
atts3 <- c("capacity", "korean", "economic", "strategy", "location")
atts4 <- c("president", "officer", "assembly", "media")
df1ahp <- df1 %>%
ahp.mat(atts = atts1, negconvert = TRUE)
df2ahp <- df2 %>%
ahp.mat(atts = atts2, negconvert = TRUE)
df3ahp <- df3 %>%
ahp.mat(atts = atts3, negconvert = TRUE)
df4ahp <- df4 %>%
ahp.mat(atts = atts4, negconvert = TRUE)
cr <- df1 %>%
ahp.mat(atts1, negconvert = T) %>%
ahp.cr(atts1)
table(cr <= 0.1)
##
## FALSE TRUE
## 17 8
0.1 이하
thres<-0.1
cr.df1 <- df1 %>%
ahp.mat(atts1, negconvert = TRUE) %>%
ahp.cr(atts1) %>%
data.frame() %>%
mutate(rowid = 1:25, cr.dum = as.factor(ifelse(cr <= thres, 1, 0)))
cr.df1 %>%
filter(cr.dum==1)
cr1.df1 <- df1 %>%
mutate(rowid = 1:25)
df1n <- inner_join(cr1.df1, cr.df1) %>%
filter(cr.dum==1)%>%
select(1:3)
## Joining with `by = join_by(rowid)`
mean <- df1n %>%
ahp.mat(atts = atts1, negconvert = TRUE) %>%
ahp.aggpref(atts1, method = "arithmetic")
mean
## inter inner actor
## 0.4253290 0.1729622 0.4017087
cr <- df1n %>%
ahp.mat(atts1, negconvert = T) %>%
ahp.cr(atts1)
mean(cr)
## [1] 0.02400797
cr <- df2 %>%
ahp.mat(atts2, negconvert = T) %>%
ahp.cr(atts2)
table(cr <= 0.1)
##
## FALSE TRUE
## 13 12
0.1 이하
thres<-0.1
cr.df2 <- df2 %>%
ahp.mat(atts2, negconvert = TRUE) %>%
ahp.cr(atts2) %>%
data.frame() %>%
mutate(rowid = 1:25, cr.dum = as.factor(ifelse(cr <= thres, 1, 0)))
cr.df2 %>%
filter(cr.dum==1)
cr1.df2 <- df2 %>%
mutate(rowid = 1:25)
df2n <- inner_join(cr1.df2, cr.df2) %>%
filter(cr.dum==1)%>%
select(1:6)
## Joining with `by = join_by(rowid)`
mean <- df2n %>%
ahp.mat(atts = atts2, negconvert = TRUE) %>%
ahp.aggpref(atts2, method = "arithmetic")
mean
## inter foreign together safety
## 0.2373844 0.2220245 0.2424104 0.2981807
cr <- df2n %>%
ahp.mat(atts2, negconvert = T) %>%
ahp.cr(atts2)
mean(cr)
## [1] 0.04730747
cr <- df3 %>%
ahp.mat(atts3, negconvert = T) %>%
ahp.cr(atts3)
table(cr <= 0.1)
##
## FALSE TRUE
## 16 9
0.1 이하
thres<-0.1
cr.df3 <- df3 %>%
ahp.mat(atts3, negconvert = TRUE) %>%
ahp.cr(atts3) %>%
data.frame() %>%
mutate(rowid = 1:25, cr.dum = as.factor(ifelse(cr <= thres, 1, 0)))
cr.df3 %>%
filter(cr.dum==1)
cr1.df3 <- df3 %>%
mutate(rowid = 1:25)
df3n <- inner_join(cr1.df3, cr.df3) %>%
filter(cr.dum==1)%>%
select(1:10)
## Joining with `by = join_by(rowid)`
mean <- df3n %>%
ahp.mat(atts = atts3, negconvert = TRUE) %>%
ahp.aggpref(atts3, method = "arithmetic")
mean
## capacity korean economic strategy location
## 0.2350879 0.1973460 0.2112991 0.1899084 0.1663587
cr <- df3n %>%
ahp.mat(atts3, negconvert = T) %>%
ahp.cr(atts3)
mean(cr)
## [1] 0.04860368
cr <- df4 %>%
ahp.mat(atts4, negconvert = T) %>%
ahp.cr(atts4)
table(cr <= 0.1)
##
## FALSE TRUE
## 16 9
0.1 이하
thres<-0.1
cr.df4 <- df4 %>%
ahp.mat(atts4, negconvert = TRUE) %>%
ahp.cr(atts4) %>%
data.frame() %>%
mutate(rowid = 1:25, cr.dum = as.factor(ifelse(cr <= thres, 1, 0)))
cr.df4 %>%
filter(cr.dum==1)
cr1.df4 <- df4 %>%
mutate(rowid = 1:25)
df4n <- inner_join(cr1.df4, cr.df4) %>%
filter(cr.dum==1)%>%
select(1:6)
## Joining with `by = join_by(rowid)`
mean <- df4n %>%
ahp.mat(atts = atts4, negconvert = TRUE) %>%
ahp.aggpref(atts4, method = "arithmetic")
mean
## president officer assembly media
## 0.4637191 0.1807063 0.2106244 0.1449501
cr <- df4n %>%
ahp.mat(atts4, negconvert = T) %>%
ahp.cr(atts4)
mean(cr)
## [1] 0.05015281
df.cat <- df_cat %>%
mutate(rowid = 1:25)
df1n.cat <- inner_join(cr1.df1, cr.df1)
## Joining with `by = join_by(rowid)`
df1ncat1 <- inner_join(df1n.cat, df.cat) %>%
filter(cr.dum==1 & cat==1)%>%
select(1:3)
## Joining with `by = join_by(rowid)`
df1ncat2 <- inner_join(df1n.cat, df.cat) %>%
filter(cr.dum==1 & cat==2)%>%
select(1:3)
## Joining with `by = join_by(rowid)`
mean <- df1ncat1 %>%
ahp.mat(atts = atts1, negconvert = TRUE) %>%
ahp.aggpref(atts1, method = "arithmetic")
mean
## inter inner actor
## 0.5308942 0.1065402 0.3625657
cr <- df1ncat1 %>%
ahp.mat(atts1, negconvert = T) %>%
ahp.cr(atts1)
mean(cr)
## [1] 0.02589972
mean <- df1ncat2 %>%
ahp.mat(atts = atts1, negconvert = TRUE) %>%
ahp.aggpref(atts1, method = "arithmetic")
mean
## inter inner actor
## 0.1086337 0.3722284 0.5191379
cr <- df1ncat2 %>%
ahp.mat(atts=atts1, negconvert = TRUE) %>%
ahp.cr(atts1)
mean(cr)
## [1] 0.01833273
df2n.cat <- inner_join(cr1.df2, cr.df2)
## Joining with `by = join_by(rowid)`
df2ncat1 <- inner_join(df2n.cat, df.cat) %>%
filter(cr.dum==1 & cat==1)%>%
select(1:6)
## Joining with `by = join_by(rowid)`
df2ncat2 <- inner_join(df2n.cat, df.cat) %>%
filter(cr.dum==1 & cat==2)%>%
select(1:6)
## Joining with `by = join_by(rowid)`
mean <- df2ncat1 %>%
ahp.mat(atts = atts2, negconvert = TRUE) %>%
ahp.aggpref(atts2, method = "arithmetic")
mean
## inter foreign together safety
## 0.1801833 0.1525073 0.3418430 0.3254664
cr <- df2ncat1 %>%
ahp.mat(atts2, negconvert = T) %>%
ahp.cr(atts2)
mean(cr)
## [1] 0.06160621
mean <- df2ncat2 %>%
ahp.mat(atts = atts2, negconvert = TRUE) %>%
ahp.aggpref(atts2, method = "arithmetic")
mean
## inter foreign together safety
## 0.2782423 0.2716796 0.1713871 0.2786909
cr <- df2ncat2 %>%
ahp.mat(atts=atts2, negconvert = TRUE) %>%
ahp.cr(atts2)
mean(cr)
## [1] 0.03709408
df3n.cat <- inner_join(cr1.df3, cr.df3)
## Joining with `by = join_by(rowid)`
df3ncat1 <- inner_join(df3n.cat, df.cat) %>%
filter(cr.dum==1 & cat==1)%>%
select(1:10)
## Joining with `by = join_by(rowid)`
df3ncat2 <- inner_join(df3n.cat, df.cat) %>%
filter(cr.dum==1 & cat==2)%>%
select(1:10)
## Joining with `by = join_by(rowid)`
mean <- df3ncat1 %>%
ahp.mat(atts = atts3, negconvert = TRUE) %>%
ahp.aggpref(atts3, method = "arithmetic")
mean
## capacity korean economic strategy location
## 0.2275933 0.2340223 0.2292604 0.1872384 0.1218856
cr <- df3ncat1 %>%
ahp.mat(atts3, negconvert = T) %>%
ahp.cr(atts3)
mean(cr)
## [1] 0.0486312
mean <- df3ncat2 %>%
ahp.mat(atts = atts3, negconvert = TRUE) %>%
ahp.aggpref(atts3, method = "arithmetic")
mean
## capacity korean economic strategy location
## 0.2444560 0.1515006 0.1888475 0.1932459 0.2219500
cr <- df3ncat2 %>%
ahp.mat(atts=atts3, negconvert = TRUE) %>%
ahp.cr(atts3)
mean(cr)
## [1] 0.04856929
df4n.cat <- inner_join(cr1.df4, cr.df4)
## Joining with `by = join_by(rowid)`
df4ncat1 <- inner_join(df4n.cat, df.cat) %>%
filter(cr.dum==1 & cat==1)%>%
select(1:6)
## Joining with `by = join_by(rowid)`
df4ncat2 <- inner_join(df4n.cat, df.cat) %>%
filter(cr.dum==1 & cat==2)%>%
select(1:6)
## Joining with `by = join_by(rowid)`
mean <- df4ncat1 %>%
ahp.mat(atts = atts4, negconvert = TRUE) %>%
ahp.aggpref(atts4, method = "arithmetic")
mean
## president officer assembly media
## 0.4668560 0.1080815 0.2791554 0.1459071
cr <- df4ncat1 %>%
ahp.mat(atts4, negconvert = T) %>%
ahp.cr(atts4)
mean(cr)
## [1] 0.04817232
mean <- df4ncat2 %>%
ahp.mat(atts = atts4, negconvert = TRUE) %>%
ahp.aggpref(atts4, method = "arithmetic")
mean
## president officer assembly media
## 0.4597980 0.2714873 0.1249607 0.1437539
cr <- df4ncat2 %>%
ahp.mat(atts=atts4, negconvert = TRUE) %>%
ahp.cr(atts4)
mean(cr)
## [1] 0.05262842
dfwt <- readxl::read_excel("wt.xlsx")
head(dfwt)
cor.test(dfwt$v1, dfwt$v2, method = "spearman")
## Warning in cor.test.default(dfwt$v1, dfwt$v2, method = "spearman"): tie때문에
## 정확한 p값을 계산할 수 없습니다
##
## Spearman's rank correlation rho
##
## data: dfwt$v1 and dfwt$v2
## S = 533.23, p-value = 0.1094
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## -0.4649248
cor.test(dfwt$v1, dfwt$v2, method = "kendall")
## Warning in cor.test.default(dfwt$v1, dfwt$v2, method = "kendall"): tie때문에
## 정확한 p값을 계산할 수 없습니다
##
## Kendall's rank correlation tau
##
## data: dfwt$v1 and dfwt$v2
## z = -1.6503, p-value = 0.09888
## alternative hypothesis: true tau is not equal to 0
## sample estimates:
## tau
## -0.3483943
cor.test(dfwt$v1, dfwt$v2, method ="pearson")
##
## Pearson's product-moment correlation
##
## data: dfwt$v1 and dfwt$v2
## t = 0.098583, df = 11, p-value = 0.9232
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.529950 0.571343
## sample estimates:
## cor
## 0.0297107
dfnd <- readxl::read_excel("dfnd.xlsx")
head(dfnd)
mean(dfnd$q1_1)
## [1] 6.84
mean(dfnd$q1_2)
## [1] 5.48
mean(dfnd$q1_2)
## [1] 5.48
mean(dfnd$q1_3)
## [1] 6.52
mean(dfnd$q1_4)
## [1] 5.64
mean(dfnd$q1_5)
## [1] 5.4
mean(dfnd$q1_6)
## [1] 5.72
mean(dfnd$q1_7)
## [1] 5.48
mean(dfnd$q1_8)
## [1] 4.96
mean(dfnd$q1_9)
## [1] 4.76
mean(dfnd$q1_10)
## [1] 7.32
mean(dfnd$q1_11)
## [1] 5.36
mean(dfnd$q1_12)
## [1] 6.16
mean(dfnd$q1_13)
## [1] 5.88
mean(dfnd$q2_1)
## [1] 7.6
mean(dfnd$q2_2)
## [1] 6.24
mean(dfnd$q2_2)
## [1] 6.24
mean(dfnd$q2_3)
## [1] 6.96
mean(dfnd$q2_4)
## [1] 6.36
mean(dfnd$q2_5)
## [1] 5.84
mean(dfnd$q2_6)
## [1] 6
mean(dfnd$q2_7)
## [1] 6.6
mean(dfnd$q2_8)
## [1] 6.08
mean(dfnd$q2_9)
## [1] 4.8
mean(dfnd$q2_10)
## [1] 7.48
mean(dfnd$q2_11)
## [1] 5.44
mean(dfnd$q2_12)
## [1] 6.24
mean(dfnd$q2_13)
## [1] 6.8
sd(dfnd$q1_1)
## [1] 1.491085
sd(dfnd$q1_2)
## [1] 1.960442
sd(dfnd$q1_3)
## [1] 1.262273
sd(dfnd$q1_4)
## [1] 2.289105
sd(dfnd$q1_5)
## [1] 1.936492
sd(dfnd$q1_6)
## [1] 2.010804
sd(dfnd$q1_7)
## [1] 1.557776
sd(dfnd$q1_8)
## [1] 1.593738
sd(dfnd$q1_9)
## [1] 1.942507
sd(dfnd$q1_10)
## [1] 0.9882645
sd(dfnd$q1_11)
## [1] 1.704895
sd(dfnd$q1_12)
## [1] 1.675311
sd(dfnd$q1_13)
## [1] 1.763519
sd(dfnd$q2_1)
## [1] 1.527525
sd(dfnd$q2_2)
## [1] 1.738774
sd(dfnd$q2_3)
## [1] 1.540563
sd(dfnd$q2_4)
## [1] 2.2151
sd(dfnd$q2_5)
## [1] 1.993322
sd(dfnd$q2_6)
## [1] 1.581139
sd(dfnd$q2_7)
## [1] 2.179449
sd(dfnd$q2_8)
## [1] 1.891208
sd(dfnd$q2_9)
## [1] 2.140872
sd(dfnd$q2_10)
## [1] 1.122497
sd(dfnd$q2_11)
## [1] 2.103172
sd(dfnd$q2_12)
## [1] 1.809236
sd(dfnd$q2_13)
## [1] 1.607275
dfnd$qd1<-(dfnd$q2_1 - dfnd$q1_1)
dfnd$qd2<-(dfnd$q2_2 - dfnd$q1_2)
dfnd$qd3<-(dfnd$q2_3 - dfnd$q1_3)
dfnd$qd4<-(dfnd$q2_4 - dfnd$q1_4)
dfnd$qd5<-(dfnd$q2_5 - dfnd$q1_5)
dfnd$qd6<-(dfnd$q2_6 - dfnd$q1_6)
dfnd$qd7<-(dfnd$q2_7 - dfnd$q1_7)
dfnd$qd8<-(dfnd$q2_8 - dfnd$q1_8)
dfnd$qd9<-(dfnd$q2_9 - dfnd$q1_9)
dfnd$qd10<-(dfnd$q2_10 - dfnd$q1_10)
dfnd$qd11<-(dfnd$q2_11 - dfnd$q1_11)
dfnd$qd12<-(dfnd$q2_12 - dfnd$q1_12)
dfnd$qd13<-(dfnd$q2_13 - dfnd$q1_13)
mean(dfnd$qd1)
## [1] 0.76
sd(dfnd$qd1)
## [1] 1.128421
mean(dfnd$qd2)
## [1] 0.76
sd(dfnd$qd2)
## [1] 1.234234
mean(dfnd$qd3)
## [1] 0.44
sd(dfnd$qd3)
## [1] 0.9165151
mean(dfnd$qd4)
## [1] 0.72
sd(dfnd$qd4)
## [1] 0.8906926
mean(dfnd$qd5)
## [1] 0.44
sd(dfnd$qd5)
## [1] 1.121011
mean(dfnd$qd6)
## [1] 0.28
sd(dfnd$qd6)
## [1] 1.339154
mean(dfnd$qd7)
## [1] 1.12
sd(dfnd$qd7)
## [1] 1.615549
mean(dfnd$qd8)
## [1] 1.12
sd(dfnd$qd8)
## [1] 1.9
mean(dfnd$qd9)
## [1] 0.04
sd(dfnd$qd9)
## [1] 1.619671
mean(dfnd$qd10)
## [1] 0.16
sd(dfnd$qd10)
## [1] 1.106044
mean(dfnd$qd11)
## [1] 0.08
sd(dfnd$qd11)
## [1] 1.28841
mean(dfnd$qd12)
## [1] 0.08
sd(dfnd$qd12)
## [1] 1.115049
mean(dfnd$qd13)
## [1] 0.92
sd(dfnd$qd13)
## [1] 1.800926
t.test(dfnd$q1_1, dfnd$q2_1)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_1 and dfnd$q2_1
## t = -1.7802, df = 47.972, p-value = 0.08138
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -1.6184072 0.0984072
## sample estimates:
## mean of x mean of y
## 6.84 7.60
t.test(dfnd$q1_2, dfnd$q2_2)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_2 and dfnd$q2_2
## t = -1.4501, df = 47.325, p-value = 0.1536
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -1.8141349 0.2941349
## sample estimates:
## mean of x mean of y
## 5.48 6.24
t.test(dfnd$q1_3, dfnd$q2_3)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_3 and dfnd$q2_3
## t = -1.1046, df = 46.213, p-value = 0.275
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -1.2416966 0.3616966
## sample estimates:
## mean of x mean of y
## 6.52 6.96
t.test(dfnd$q1_4, dfnd$q2_4)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_4 and dfnd$q2_4
## t = -1.1302, df = 47.948, p-value = 0.264
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -2.0009642 0.5609642
## sample estimates:
## mean of x mean of y
## 5.64 6.36
t.test(dfnd$q1_5, dfnd$q2_5)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_5 and dfnd$q2_5
## t = -0.79163, df = 47.96, p-value = 0.4325
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -1.5575706 0.6775706
## sample estimates:
## mean of x mean of y
## 5.40 5.84
t.test(dfnd$q1_6, dfnd$q2_6)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_6 and dfnd$q2_6
## t = -0.5473, df = 45.47, p-value = 0.5868
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -1.3101187 0.7501187
## sample estimates:
## mean of x mean of y
## 5.72 6.00
t.test(dfnd$q1_7, dfnd$q2_7)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_7 and dfnd$q2_7
## t = -2.0904, df = 43.447, p-value = 0.04247
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -2.20019438 -0.03980562
## sample estimates:
## mean of x mean of y
## 5.48 6.60
t.test(dfnd$q1_8, dfnd$q2_8)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_8 and dfnd$q2_8
## t = -2.2643, df = 46.66, p-value = 0.02825
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -2.1152747 -0.1247253
## sample estimates:
## mean of x mean of y
## 4.96 6.08
t.test(dfnd$q1_9, dfnd$q2_9)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_9 and dfnd$q2_9
## t = -0.069185, df = 47.553, p-value = 0.9451
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -1.202747 1.122747
## sample estimates:
## mean of x mean of y
## 4.76 4.80
t.test(dfnd$q1_10, dfnd$q2_10)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_10 and dfnd$q2_10
## t = -0.53492, df = 47.242, p-value = 0.5952
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.76165 0.44165
## sample estimates:
## mean of x mean of y
## 7.32 7.48
t.test(dfnd$q1_11, dfnd$q2_11)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_11 and dfnd$q2_11
## t = -0.14774, df = 46.029, p-value = 0.8832
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -1.169923 1.009923
## sample estimates:
## mean of x mean of y
## 5.36 5.44
t.test(dfnd$q1_12, dfnd$q2_12)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_12 and dfnd$q2_12
## t = -0.16222, df = 47.719, p-value = 0.8718
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -1.0717017 0.9117017
## sample estimates:
## mean of x mean of y
## 6.16 6.24
t.test(dfnd$q1_13, dfnd$q2_13)
##
## Welch Two Sample t-test
##
## data: dfnd$q1_13 and dfnd$q2_13
## t = -1.9279, df = 47.593, p-value = 0.05985
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -1.8797155 0.0397155
## sample estimates:
## mean of x mean of y
## 5.88 6.80
mean((dfnd$q2_1 - dfnd$q1_1)*mean(dfnd$q2_1))
## [1] 5.776
mean((dfnd$q2_2 - dfnd$q1_2)*mean(dfnd$q2_2))
## [1] 4.7424
mean((dfnd$q2_3 - dfnd$q1_3)*mean(dfnd$q2_3))
## [1] 3.0624
mean((dfnd$q2_4 - dfnd$q1_4)*mean(dfnd$q2_4))
## [1] 4.5792
mean((dfnd$q2_5 - dfnd$q1_5)*mean(dfnd$q2_5))
## [1] 2.5696
mean((dfnd$q2_6 - dfnd$q1_6)*mean(dfnd$q2_6))
## [1] 1.68
mean((dfnd$q2_7 - dfnd$q1_7)*mean(dfnd$q2_7))
## [1] 7.392
mean((dfnd$q2_8 - dfnd$q1_8)*mean(dfnd$q2_8))
## [1] 6.8096
mean((dfnd$q2_9 - dfnd$q1_9)*mean(dfnd$q2_9))
## [1] 0.192
mean((dfnd$q2_10 - dfnd$q1_10)*mean(dfnd$q2_10))
## [1] 1.1968
mean((dfnd$q2_11 - dfnd$q1_11)*mean(dfnd$q2_11))
## [1] 0.4352
mean((dfnd$q2_12 - dfnd$q1_12)*mean(dfnd$q2_12))
## [1] 0.4992
mean((dfnd$q2_13 - dfnd$q1_13)*mean(dfnd$q2_13))
## [1] 6.256
library(ggplot2)
library(dplyr)
library(data.table)
##
## 다음의 패키지를 부착합니다: 'data.table'
## The following objects are masked from 'package:dplyr':
##
## between, first, last
library(stringr)
library(tidyr)
dim(dfnd)
## [1] 25 40
colnames(dfnd)[2:14] <- paste0("today",seq(1,13,1))
colnames(dfnd)[15:27] <- paste0("future",seq(1,13,1))
dfnd_L1 <- dfnd %>%
dplyr::select(today1:future13)
#data_L1 <- na.omit(data_L1)
# 변화도 계산
dfnd_L1[paste0("gradient",1:13)] = dfnd_L1[paste0("future",1:13)] - dfnd_L1[paste0("today",1:13)]
# 평균 중요도(전체)
total_mean_importance <- dfnd_L1 %>% select(future1:future13) %>% dplyr::summarise_all(funs(mean))
## Warning: `funs()` was deprecated in dplyr 0.8.0.
## ℹ Please use a list of either functions or lambdas:
##
## # Simple named list: list(mean = mean, median = median)
##
## # Auto named with `tibble::lst()`: tibble::lst(mean, median)
##
## # Using lambdas list(~ mean(., trim = .2), ~ median(., na.rm = TRUE))
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
total_mean_importance <- mean(as.matrix(total_mean_importance[,]), na.rm=T)
# 평균 변화도 (전체)
total_mean_gradient <- dfnd_L1 %>% select(gradient1:gradient13) %>% summarise_all(funs(mean))
## Warning: `funs()` was deprecated in dplyr 0.8.0.
## ℹ Please use a list of either functions or lambdas:
##
## # Simple named list: list(mean = mean, median = median)
##
## # Auto named with `tibble::lst()`: tibble::lst(mean, median)
##
## # Using lambdas list(~ mean(., trim = .2), ~ median(., na.rm = TRUE))
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
total_mean_gradient <- mean(as.matrix(total_mean_gradient[,]), na.rm=T)
# 컬럼 평균 (중요도)
dfnd_L2_importance <- dfnd_L1 %>%
dplyr::select(future1:future13) %>%
dplyr::summarise_all(funs(mean),na.rm = T)
## Warning: `funs()` was deprecated in dplyr 0.8.0.
## ℹ Please use a list of either functions or lambdas:
##
## # Simple named list: list(mean = mean, median = median)
##
## # Auto named with `tibble::lst()`: tibble::lst(mean, median)
##
## # Using lambdas list(~ mean(., trim = .2), ~ median(., na.rm = TRUE))
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
# tidyr::gather(key = "flower_att", value = "measurement",
# Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)
# 컬럼 평균 (변화도)
dfnd_L2_gradient <- dfnd_L1 %>%
dplyr::select(gradient1:gradient13) %>%
dplyr::summarise_all(funs(mean),na.rm = T)
## Warning: `funs()` was deprecated in dplyr 0.8.0.
## ℹ Please use a list of either functions or lambdas:
##
## # Simple named list: list(mean = mean, median = median)
##
## # Auto named with `tibble::lst()`: tibble::lst(mean, median)
##
## # Using lambdas list(~ mean(., trim = .2), ~ median(., na.rm = TRUE))
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## 가공1 ##
dfnd_L2_importance_L1 <- as.data.frame(t(dfnd_L2_importance))
rownames_imp <- rownames(dfnd_L2_importance_L1)
dfnd_L2_importance_L1 <- cbind(dfnd_L2_importance_L1,rownames_imp)
colnames(dfnd_L2_importance_L1) <- c("Importance","row_import")
## 가공1 ##
## 가공2 ##
dfnd_L2_gradient_L1 <- as.data.frame(t(dfnd_L2_gradient))
rownames_grad <- rownames(dfnd_L2_gradient_L1)
dfnd_L2_gradient_L1 <- cbind(dfnd_L2_gradient_L1,rownames_grad)
colnames(dfnd_L2_gradient_L1) <- c("Gradient","row_grad")
## 가공2 ##
## 최종 ##
result_set <- cbind(dfnd_L2_importance_L1,dfnd_L2_gradient_L1) %>%
dplyr::mutate(data_label = paste0(seq(1:13)))
result_set_write <- result_set %>%
dplyr::select(Importance,Gradient,data_label)
write.csv(result_set_write,"./dfnd",row.names = FALSE)
## ggplot ##
ggplot() +
theme_bw() +
theme(plot.title=element_text(face="bold", size=20, color="black")) +
theme(axis.title.x = element_text(face="bold", size=15, colour="black")) +
theme(axis.title.y = element_text(face="bold", size=15, colour="black", angle=0, vjust = 0.5)) +
theme(axis.text.x = element_text(face="bold", size=15, colour="black")) +
theme(axis.text.y = element_text(face="bold", size=15, colour="black")) +
#labs(title = paste0("Locus For Focus Result (해외파병)")) +
labs(title = paste0("")) +
#geom_point(data=result_set, aes(x=val_import,y=val_grad),color = "black",size = 8) +
#annotate("text", x = result_set$val_import, y = result_set$val_grad, label = "Some text") +
geom_text(data = result_set, aes(x = Importance, y = Gradient, label = data_label)) +
geom_hline(yintercept=total_mean_gradient,size = 1.5, linetype='dashed') +
geom_vline(xintercept=total_mean_importance,size = 1.5, linetype='dashed') +
xlim(total_mean_importance-1.8,total_mean_importance+1.8) +
ylim(total_mean_gradient-0.8,total_mean_gradient+0.8) +
xlab(paste0("미래중요도 평균값 \n", "(M=",round(total_mean_importance,2),")")) +
ylab(paste0("미래중요도-현재중요도\n 차의 평균값 \n", "(M=",round(total_mean_gradient,2),")"))
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
#labs(title = paste0("WRF 300m ",daychr," night(04-05) mean WDIR and WS" ) ) +
보내드리는 “rest_dfnd.png” 파일을 논문에 첨부하세요.
==> 이는 위의 표를 정리하면 됩니다.