Documentos de Académico
Documentos de Profesional
Documentos de Cultura
Castillo Benites, Cayllahua Ríos, Nicolás Olascuaga, Zamudio Limas y Zavaleta Cotrina
2023-01-09
install.packages("readxl")
install.packages("tseries")
install.packages("astsa")
install.packages("forecast")
install.packages("tidyverse")
install.packages("lubridate")
install.packages("foreign")
install.packages("quantmod")
1
install.packages("readxl")
library(readxl)
library(tseries)
library(astsa)
library(forecast)
library(tidyverse)
library(lubridate)
library(foreign)
library(quantmod)
#EJERCICIO 1
#Cargamos la base de datos
## # A tibble: 12 x 2
## Año ‘Venta de autos‘
## <dttm> <dbl>
## 1 2022-01-01 00:00:00 20
## 2 2022-02-01 00:00:00 21
## 3 2022-03-01 00:00:00 15
## 4 2022-04-01 00:00:00 14
## 5 2022-05-01 00:00:00 13
## 6 2022-06-01 00:00:00 16
## 7 2022-07-01 00:00:00 17
## 8 2022-08-01 00:00:00 18
## 9 2022-09-01 00:00:00 20
## 10 2022-10-01 00:00:00 20
## 11 2022-11-01 00:00:00 21
## 12 2022-12-01 00:00:00 23
## Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
## 2022 20 21 15 14 13 16 17 18 20 20 21 23
#Graficamos la serie
plot(venta.ts)
2
22
20
Venta de autos
18
16
14
Time
##
## Augmented Dickey-Fuller Test
##
## data: venta.ts
## Dickey-Fuller = -3.9126, Lag order = 2, p-value = 0.02767
## alternative hypothesis: stationary
#Dado que el valor p-value= 0.02 es menor que 0.05 el modelo es estacionario.
3
Venta de autos
1.0
0.5
ACF
0.0
−0.5
Lag
4
Series venta.ts
0.6
0.4
0.2
Partial ACF
−0.2 0.0
−0.6
Lag
5
Venta de autos
1.0
0.5
ACF
0.0
−0.5
0 2 4 6 8 10
Lag
pacf(ts(venta.ts,frequency=1))
6
Series ts(venta.ts, frequency = 1)
0.6
0.4
0.2
Partial ACF
−0.2 0.0
−0.6
2 4 6 8 10
Lag
##
## Call:
## arima(x = venta.ts, order = c(1, 2, 2))
##
## Coefficients:
## ar1 ma1 ma2
## -0.8208 0.0892 -0.2046
## s.e. 0.3246 0.4717 0.4816
##
## sigma^2 estimated as 5.98: log likelihood = -23.5, aic = 55.01
tsdiag(modelo1)
7
−2.0
Standardized Residuals
Time
ACF of Residuals
ACF
−0.5
Lag
0.0
2 4 6 8 10
lag
##
## Box-Ljung test
##
## data: residuals(modelo1)
## X-squared = 0.67132, df = 1, p-value = 0.4126
error=residuals(modelo1)
#Graficamos el error
plot(error)
8
4
2
0
error
−2
−4
Time
## Point Forecast Lo 95 Hi 95
## Jan 2023 24.35962 19.566826 29.15241
## Feb 2023 25.93181 18.190332 33.67329
## Mar 2023 27.32953 15.257545 39.40152
## Apr 2023 28.87045 12.612638 45.12827
## May 2023 30.29384 8.972661 51.61502
#Graficamos
plot(pronostico)
9
Forecasts from ARIMA(1,2,2)
50
40
30
20
10
10
venta de autos
50
40
ventas
30
20
10
mes
#EJERCICIO 2
#Cargamos la base de datos
## # A tibble: 11 x 2
## Años DEMANDA
## <dbl> <dbl>
## 1 1 7
## 2 2 9
## 3 3 5
## 4 4 9
## 5 5 13
## 6 6 8
## 7 7 12
## 8 8 13
## 9 9 9
## 10 10 11
## 11 11 7
## Time Series:
11
## Start = 1
## End = 11
## Frequency = 1
## DEMANDA
## [1,] 7
## [2,] 9
## [3,] 5
## [4,] 9
## [5,] 13
## [6,] 8
## [7,] 12
## [8,] 13
## [9,] 9
## [10,] 11
## [11,] 7
#Graficamos la serie
plot(Demanda.ts)
12
DEMANDA
10
8
6
2 4 6 8 10
Time
12
##
## Augmented Dickey-Fuller Test
##
## data: Demanda.ts
## Dickey-Fuller = 1.4041, Lag order = 2, p-value = 0.99
## alternative hypothesis: stationary
#Dado que el valor p-value= 0.99 es mayor que 0.05 el modelo no es estacionario por lo tanto debemos di
Demanda1=diff(Demanda.ts)
Demanda1
## Time Series:
## Start = 2
## End = 11
## Frequency = 1
## DEMANDA
## [1,] 2
## [2,] -4
## [3,] 4
## [4,] 4
## [5,] -5
## [6,] 4
## [7,] 1
## [8,] -4
## [9,] 2
## [10,] -4
plot(Demanda1)
13
4
2
DEMANDA
0
−2
−4
2 4 6 8 10
Time
adf.test(Demanda1, alternative="stationary")
##
## Augmented Dickey-Fuller Test
##
## data: Demanda1
## Dickey-Fuller = -1.7734, Lag order = 2, p-value = 0.6587
## alternative hypothesis: stationary
demandas2=diff(Demanda1, differences=2)
demandas2
## Time Series:
## Start = 4
## End = 11
## Frequency = 1
## DEMANDA
## [1,] 14
## [2,] -8
## [3,] -9
## [4,] 18
## [5,] -12
## [6,] -2
## [7,] 11
## [8,] -12
14
plot(demandas2)
15
10
DEMANDA
5
0
−10 −5
4 5 6 7 8 9 10 11
Time
##
## Augmented Dickey-Fuller Test
##
## data: demandas2
## Dickey-Fuller = -11.925, Lag order = 1, p-value = 0.01
## alternative hypothesis: stationary
#Dado que tiene un valor menor a 0.05 se concluye que el modelo es estacionario.
#Ahora determinaremos el número de media móviles para el modelo arima.
acf(demandas2)
15
DEMANDA
1.0
0.5
ACF
0.0
−0.5
0 1 2 3 4 5 6 7
Lag
16
Series demandas2
0.6
0.2
Partial ACF
−0.2
−0.6
1 2 3 4 5 6 7
Lag
17
DEMANDA
1.0
0.5
ACF
0.0
−0.5
0 1 2 3 4 5 6 7
Lag
pacf(ts(demandas2,frequency=1))
18
Series ts(demandas2, frequency = 1)
0.6
0.2
Partial ACF
−0.2
−0.6
1 2 3 4 5 6 7
Lag
##
## Call:
## arima(x = Demanda.ts, order = c(1, 2, 1))
##
## Coefficients:
## ar1 ma1
## -0.4668 -1.0000
## s.e. 0.2921 0.6135
##
## sigma^2 estimated as 10.46: log likelihood = -24.96, aic = 55.92
tsdiag(modelo1)
19
−1.0
Standardized Residuals
2 4 6 8 10
Time
ACF of Residuals
ACF
−0.5
0 2 4 6 8 10
Lag
0.0
2 4 6 8 10
lag
##
## Box-Ljung test
##
## data: residuals(modelo1)
## X-squared = 1.038, df = 1, p-value = 0.3083
error=residuals(modelo1)
#Graficamos el error
plot(error)
20
4
2
error
0
−2
−4
2 4 6 8 10
Time
## Point Forecast Lo 95 Hi 95
## 12 8.966881 2.2985721 15.63519
## 13 8.148460 0.2945349 16.00239
## 14 8.630201 -1.2009881 18.46139
## 15 8.505034 -2.8145798 19.82465
## 16 8.663168 -4.2701752 21.59651
#Graficamos
plot(pronostico)
21
Forecasts from ARIMA(1,2,1)
20
15
10
5
0
−5
5 10 15
22
Pronóstico de la demanda
20
15
Demanda
10
5
0
−5
5 10 15
año
#EJERCICIO 3
#Cargamos la base de datos
## # A tibble: 24 x 2
## Trimestre ‘Venta en miles de litros‘
## <dttm> <dbl>
## 1 2022-01-01 00:00:00 258
## 2 2022-04-01 00:00:00 370
## 3 2022-07-01 00:00:00 392
## 4 2022-10-01 00:00:00 303
## 5 2023-01-01 00:00:00 265
## 6 2023-04-01 00:00:00 382
## 7 2023-07-01 00:00:00 401
## 8 2023-10-01 00:00:00 312
## 9 2024-01-01 00:00:00 292
## 10 2024-04-01 00:00:00 387
## # ... with 14 more rows
23
## 2022 258 370 392 303
## 2023 265 382 401 312
## 2024 292 387 424 325
## 2025 299 412 448 329
## 2026 304 419 463 351
## 2027 310 438 486 367
#Graficamos la serie
plot(Litros.ts)
450
Venta en miles de litros
400
350
300
250
Time
##
## Augmented Dickey-Fuller Test
##
## data: Litros.ts
## Dickey-Fuller = -7.8548, Lag order = 2, p-value = 0.01
## alternative hypothesis: stationary
24
#Dado que el valor p-value= 0.01 es menor que 0.05 el modelo es estacionario.
0.0
−0.5
Lag
25
Series Litros.ts
0.6
0.4
0.2
Partial ACF
−0.2
−0.6
Lag
26
Venta en miles de litros
1.0
0.5
ACF
0.0
−0.5
0 2 4 6 8 10 12
Lag
pacf(ts(Litros.ts,frequency=1))
27
Series ts(Litros.ts, frequency = 1)
0.6
0.4
0.2
Partial ACF
−0.2
−0.6
2 4 6 8 10 12
Lag
##
## Call:
## arima(x = Litros.ts, order = c(3, 2, 6))
##
## Coefficients:
## ar1 ar2 ar3 ma1 ma2 ma3 ma4 ma5 ma6
## -0.8974 -0.9822 -0.9132 -1.5450 -0.1011 0.4950 -0.1536 1.0454 -0.7402
## s.e. 0.1628 0.0131 0.1615 2.1758 1.3130 1.5162 0.6069 0.8016 1.6263
##
## sigma^2 estimated as 24.48: log likelihood = -80.72, aic = 181.45
tsdiag(modelo1)
28
−1.5
Standardized Residuals
Time
ACF of Residuals
ACF
−0.4
Lag
0.0
2 4 6 8 10
lag
##
## Box-Ljung test
##
## data: residuals(modelo1)
## X-squared = 0.0057584, df = 1, p-value = 0.9395
error=residuals(modelo1)
#Graficamos el error
plot(error)
29
5
error
0
−5
Time
## Point Forecast Lo 95 Hi 95
## 2028 Q1 320.5803 309.4697 331.6909
## 2028 Q2 442.7778 431.4601 454.0956
## 2028 Q3 501.5733 488.5049 514.6416
## 2028 Q4 385.9994 372.8009 399.1978
## 2029 Q1 333.4197 318.7532 348.0861
## 2029 Q2 453.6359 438.2861 468.9857
## 2029 Q3 516.1443 499.6613 532.6273
## 2029 Q4 403.2026 386.5847 419.8204
#Graficamos
plot(pronostico)
30
Forecasts from ARIMA(3,2,6)
500
450
400
350
300
250
31
venta en miles de litros por trimestre
500
ventas miles de litros
450
400
350
300
250
trimestre
#EJERCICIO 4
#Cargamos la base de datos
## # A tibble: 24 x 2
## Trimestre Demanda
## <dttm> <dbl>
## 1 2001-01-01 00:00:00 50
## 2 2001-04-01 00:00:00 45
## 3 2001-07-01 00:00:00 52
## 4 2001-10-01 00:00:00 56
## 5 2002-01-01 00:00:00 53
## 6 2002-04-01 00:00:00 48
## 7 2002-07-01 00:00:00 57
## 8 2002-10-01 00:00:00 62
## 9 2003-01-01 00:00:00 56
## 10 2003-04-01 00:00:00 50
## # ... with 14 more rows
32
## 1 50 45 52 56
## 2 53 48 57 62
## 3 56 50 60 67
## 4 62 56 65 71
## 5 65 60 70 77
## 6 73 66 75 85
#Graficamos la serie
plot(De.ts)
80
70
Demanda
60
50
1 2 3 4 5 6
Time
##
## Augmented Dickey-Fuller Test
##
## data: De.ts
## Dickey-Fuller = -3.2076, Lag order = 2, p-value = 0.1123
## alternative hypothesis: stationary
#Dado que el valor p-value= 0.1123 es mayor que 0.05 el modelo no es estacionario por lo tanto debemos
Demanda1=diff(De.ts)
Demanda1
33
## 1 -5 7 4
## 2 -3 -5 9 5
## 3 -6 -6 10 7
## 4 -5 -6 9 6
## 5 -6 -5 10 7
## 6 -4 -7 9 10
plot(Demanda1)
10
5
Demanda
0
−5
2 3 4 5 6
Time
##
## Augmented Dickey-Fuller Test
##
## data: Demanda1
## Dickey-Fuller = -13.069, Lag order = 2, p-value = 0.01
## alternative hypothesis: stationary
#Dado que tiene un valor menor a 0.05 se concluye que el modelo es estacionario.
#Ahora determinaremos el número de media móviles para el modelo arima.
acf(Demanda1)
34
Demanda
1.0
0.5
ACF
0.0
−0.5
Lag
35
Series Demanda1
0.0 0.2 0.4
Partial ACF
−0.4
−0.8
Lag
36
Demanda
1.0
0.5
ACF
0.0
−0.5
0 2 4 6 8 10 12
Lag
pacf(ts(Demanda1,frequency=1))
37
Series ts(Demanda1, frequency = 1)
0.0 0.2 0.4
Partial ACF
−0.4
−0.8
2 4 6 8 10 12
Lag
##
## Call:
## arima(x = De.ts, order = c(2, 2, 4))
##
## Coefficients:
## ar1 ar2 ma1 ma2 ma3 ma4
## 0.0046 -0.9973 -1.5799 1.4249 -1.5799 1.0000
## s.e. 0.0180 0.0035 0.3036 0.3408 0.4579 0.3717
##
## sigma^2 estimated as 1.374: log likelihood = -43.34, aic = 100.69
tsdiag(modelo1)
38
−1.5
Standardized Residuals
1 2 3 4 5 6
Time
ACF of Residuals
ACF
−0.4
Lag
0.0
2 4 6 8 10
lag
##
## Box-Ljung test
##
## data: residuals(modelo1)
## X-squared = 1.5414, df = 1, p-value = 0.2144
error=residuals(modelo1)
#Graficamos el error
plot(error)
39
2
1
error
0
−1
1 2 3 4 5 6
Time
## Point Forecast Lo 95 Hi 95
## 7 Q1 78.54114 76.04850 81.03378
## 7 Q2 72.90181 70.12383 75.67980
## 7 Q3 81.21891 78.34879 84.08903
## 7 Q4 90.50915 87.62220 93.39611
## 8 Q1 85.88552 82.61137 89.15967
## 8 Q2 80.22771 76.13581 84.31961
## 8 Q3 88.44106 83.90694 92.97518
## 8 Q4 97.74926 92.93145 102.56707
#Graficamos
plot(pronostico)
40
Forecasts from ARIMA(2,2,4)
100
90
80
70
60
50
2 4 6 8
41
Pronóstico de la demanda
100
90
Demanda
80
70
60
50
2 4 6 8
trimestre
#EJERCICIO 5
#Primero descargar y cargar la base de datos a la cuál le pondremos de nombre "Co" y acceder a variable
Co = read_excel("práctica de pronóstico - excel.xlsx", sheet= "Base de datos - Ejercicio 5")
Co
## # A tibble: 10 x 3
## AÑO ‘Cosecha (miles de bushells)‘ ‘Lluvia (pulgadas)‘
## <dbl> <dbl> <dbl>
## 1 1984 11 6.5
## 2 1985 12 7.2
## 3 1986 10.5 7.5
## 4 1987 11.5 9
## 5 1988 12.5 8.3
## 6 1989 7.5 2.2
## 7 1990 9.5 5
## 8 1991 11 6.6
## 9 1992 11 7.5
## 10 1993 12 8.1
attach(Co)
42
#Seleccionamos las columnas con las variables que queremos ver su correlación.
pairs(Co[,1:3])
8 9 10 11 12
1992
1988
AÑO
1984
12
8
Lluvia (pulgadas)
6
4
2
1984 1986 1988 1990 1992 2 3 4 5 6 7 8 9
## # A tibble: 10 x 3
## AÑO ‘Cosecha (miles de bushells)‘ ‘Lluvia (pulgadas)‘
## <dbl> <dbl> <dbl>
## 1 1984 11 6.5
## 2 1985 12 7.2
## 3 1986 10.5 7.5
## 4 1987 11.5 9
## 5 1988 12.5 8.3
## 6 1989 7.5 2.2
## 7 1990 9.5 5
## 8 1991 11 6.6
## 9 1992 11 7.5
## 10 1993 12 8.1
## [1] 0.9173603
43
#Programamos la regresión lineal entre las variables "Cosecha (miles de bushells" y "Lluvia (pulgadas)"
mylm=lm(`Cosecha (miles de bushells)`~`Lluvia (pulgadas)`)
plot(`Lluvia (pulgadas)`,`Cosecha (miles de bushells)`,xlab="lluvia (pulgadas",ylab="cosecha (miles de
abline(mylm,col="blue")
11
10
9
8
2 3 4 5 6 7 8 9
lluvia (pulgadas
summary(mylm)
##
## Call:
## lm(formula = ‘Cosecha (miles de bushells)‘ ~ ‘Lluvia (pulgadas)‘)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.85663 -0.30573 0.06362 0.33066 0.87049
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 6.2210 0.7362 8.450 2.94e-05 ***
## ‘Lluvia (pulgadas)‘ 0.6817 0.1046 6.518 0.000185 ***
## ---
## Signif. codes: 0 ’***’ 0.001 ’**’ 0.01 ’*’ 0.05 ’.’ 0.1 ’ ’ 1
##
## Residual standard error: 0.6139 on 8 degrees of freedom
## Multiple R-squared: 0.8415, Adjusted R-squared: 0.8217
## F-statistic: 42.49 on 1 and 8 DF, p-value: 0.0001845
44
#Hacemos la predicción del precio para una superficie de 2000
predict(mylm,data.frame(`Lluvia (pulgadas)`=10))
## 1 2 3 4 5 6 7 8
## 10.652298 11.129510 11.334030 12.356629 11.879416 7.720848 9.629699 10.720471
## 9 10
## 11.334030 11.743069
boxplot(`Lluvia (pulgadas)`)
9
8
7
6
5
4
3
2
45
## 6 7.720848 5.868931 9.572765
## 7 9.629699 8.083570 11.175828
## 8 10.720471 9.235127 12.205814
## 9 11.334030 9.839551 12.828509
## 10 11.743069 10.225188 13.260951
#EJERCICIO 6
#Cargamos la base de datos
## # A tibble: 7 x 2
## t Yt
## <dbl> <dbl>
## 1 1 120
## 2 2 110
## 3 3 100
## 4 4 96
## 5 5 94
## 6 6 92
## 7 7 88
## Time Series:
## Start = 1
## End = 7
## Frequency = 1
## Yt
## [1,] 120
46
## [2,] 110
## [3,] 100
## [4,] 96
## [5,] 94
## [6,] 92
## [7,] 88
#Graficamos la serie
plot(y1.ts)
120
110
Yt
95 100
90
1 2 3 4 5 6 7
Time
##
## Augmented Dickey-Fuller Test
##
## data: y1.ts
## Dickey-Fuller = -8.3528, Lag order = 1, p-value = 0.01
## alternative hypothesis: stationary
47
#Dado que el valor p-value= 0.01 es menor que 0.05 el modelo es estacionario.
Yt
1.0
0.5
ACF
0.0
−0.5
0 1 2 3 4 5 6
Lag
48
Series y1.ts
0.5
Partial ACF
0.0
−0.5
1 2 3 4 5 6
Lag
49
Yt
1.0
0.5
ACF
0.0
−0.5
0 1 2 3 4 5 6
Lag
pacf(ts(y1.ts,frequency=1))
50
Series ts(y1.ts, frequency = 1)
0.5
Partial ACF
0.0
−0.5
1 2 3 4 5 6
Lag
##
## Call:
## arima(x = y1.ts, order = c(1, 2, 4))
##
## Coefficients:
## ar1 ma1 ma2 ma3 ma4
## -0.2669 0.5790 -0.0404 -1.0391 -0.4980
## s.e. 4.4526 4.0995 3.5463 3.2825 6.1623
##
## sigma^2 estimated as 3.721: log likelihood = -11.83, aic = 35.66
tsdiag(modelo1)
51
0.0
Standardized Residuals
1 2 3 4 5 6 7
Time
ACF of Residuals
ACF
−0.5
0 1 2 3 4 5 6
Lag
0.0
2 4 6 8 10
lag
##
## Box-Ljung test
##
## data: residuals(modelo1)
## X-squared = 0.80261, df = 1, p-value = 0.3703
error=residuals(modelo1)
#Graficamos el error
plot(error)
52
4
3
error
2
1
0
1 2 3 4 5 6 7
Time
## Point Forecast Lo 95 Hi 95
## 8 83.63599 79.10573 88.16624
## 9 79.03509 67.81861 90.25157
## 10 73.73890 54.57154 92.90626
## 11 68.35220 42.36673 94.33767
## 12 62.98966 30.99252 94.98680
#Graficamos
plot(pronostico)
53
Forecasts from ARIMA(1,2,4)
120
100
80
60
40
2 4 6 8 10 12
54
Pronóstico Yt
120
100
80
Yt
60
40
2 4 6 8 10 12
#EJERCICIO 7
#Cargamos la base de datos
## # A tibble: 7 x 2
## tiempo Yt
## <dbl> <dbl>
## 1 1 82
## 2 2 60
## 3 3 44
## 4 4 35
## 5 5 30
## 6 6 29
## 7 7 35
## Time Series:
## Start = 1
## End = 7
## Frequency = 1
55
## Yt
## [1,] 82
## [2,] 60
## [3,] 44
## [4,] 35
## [5,] 30
## [6,] 29
## [7,] 35
#Graficamos la serie
plot(R1.ts)
80
70
60
Yt
50
40
30
1 2 3 4 5 6 7
Time
##
## Augmented Dickey-Fuller Test
##
## data: R1.ts
## Dickey-Fuller = -0.54007, Lag order = 1, p-value = 0.9717
## alternative hypothesis: stationary
56
Yt
1.0
0.5
ACF
0.0
−0.5
0 1 2 3 4 5 6
Lag
57
Series R1.ts
0.5
Partial ACF
0.0
−0.5
1 2 3 4 5 6
Lag
58
Yt
1.0
0.5
ACF
0.0
−0.5
0 1 2 3 4 5 6
Lag
pacf(ts(R1.ts,frequency=1))
59
Series ts(R1.ts, frequency = 1)
0.5
Partial ACF
0.0
−0.5
1 2 3 4 5 6
Lag
##
## Call:
## arima(x = R1.ts, order = c(0, 2, 1))
##
## Coefficients:
## ma1
## 1.0000
## s.e. 0.5295
##
## sigma^2 estimated as 10.97: log likelihood = -13.98, aic = 31.96
tsdiag(modelo1)
60
0.0
Standardized Residuals
1 2 3 4 5 6 7
Time
ACF of Residuals
ACF
−0.5
0 1 2 3 4 5 6
Lag
0.0
2 4 6 8 10
lag
##
## Box-Ljung test
##
## data: residuals(modelo1)
## X-squared = 0.078731, df = 1, p-value = 0.779
error=residuals(modelo1)
#Graficamos el error
plot(error)
61
4
3
error
2
1
0
1 2 3 4 5 6 7
Time
## Point Forecast Lo 95 Hi 95
## 8 44.83339 37.8226363 51.84415
## 9 54.66679 33.4682482 75.86533
## 10 64.50018 25.2864637 103.71390
## 11 74.33358 13.9084391 134.75872
## 12 84.16697 -0.2537389 168.58768
#Graficamos
plot(pronostico)
62
Forecasts from ARIMA(0,2,1)
150
100
50
0
2 4 6 8 10 12
63
Pronóstico Yt
150
100
Yt
50
0
2 4 6 8 10 12
#EJERCICIO 8
#Cargamos la base de datos
## # A tibble: 8 x 2
## AÑO ‘Costo Unitario ($)‘
## <dbl> <dbl>
## 1 1 20
## 2 2 24.5
## 3 3 28.2
## 4 4 27.5
## 5 5 26.6
## 6 6 30
## 7 7 31
## 8 8 36
## Time Series:
## Start = 1
## End = 8
64
## Frequency = 1
## Costo Unitario ($)
## [1,] 20.0
## [2,] 24.5
## [3,] 28.2
## [4,] 27.5
## [5,] 26.6
## [6,] 30.0
## [7,] 31.0
## [8,] 36.0
#Graficamos la serie
plot(Costo.ts)
35
Costo Unitario ($)
30
25
20
1 2 3 4 5 6 7 8
Time
adf.test(Costo.ts,alternative = "stationary")
##
## Augmented Dickey-Fuller Test
##
## data: Costo.ts
## Dickey-Fuller = -1.5584, Lag order = 1, p-value = 0.7406
## alternative hypothesis: stationary
65
#Ahora determinaremos el número de media móviles para el modelo arima.
acf(Costo.ts)
0.0
−0.5
0 1 2 3 4 5 6 7
Lag
66
Series Costo.ts
0.6
0.2
Partial ACF
−0.2
−0.6
1 2 3 4 5 6 7
Lag
67
Costo Unitario ($)
1.0
0.5
ACF
0.0
−0.5
0 1 2 3 4 5 6 7
Lag
pacf(ts(Costo.ts,frequency=1))
68
Series ts(Costo.ts, frequency = 1)
0.6
0.2
Partial ACF
−0.2
−0.6
1 2 3 4 5 6 7
Lag
##
## Call:
## arima(x = Costo.ts, order = c(0, 2, 3))
##
## Coefficients:
## ma1 ma2 ma3
## -0.7398 -0.1544 -0.1058
## s.e. 0.7067 0.7437 0.8437
##
## sigma^2 estimated as 6.274: log likelihood = -14.77, aic = 37.54
tsdiag(modelo1)
69
−1.5
Standardized Residuals
1 2 3 4 5 6 7 8
Time
ACF of Residuals
ACF
−0.5
0 1 2 3 4 5 6 7
Lag
0.0
2 4 6 8 10
lag
##
## Box-Ljung test
##
## data: residuals(modelo1)
## X-squared = 0.11878, df = 1, p-value = 0.7304
error=residuals(modelo1)
#Graficamos el error
plot(error)
70
3
2
1
−4 −3 −2 −1 0
error
1 2 3 4 5 6 7 8
Time
## Point Forecast Lo 95 Hi 95
## 9 39.04712 33.82825 44.26599
## 10 41.85812 33.00452 50.71173
## 11 44.38028 32.19195 56.56862
## 12 46.90245 31.72287 62.08203
## 13 49.42461 31.42521 67.42401
#Graficamos
plot(pronostico)
71
Forecasts from ARIMA(0,2,3)
60
50
40
30
20
2 4 6 8 10 12
72
Pronóstico (Costo Unitario $)
60
Costo unitario $
50
40
30
20
2 4 6 8 10 12
Año
73