Está en la página 1de 11

𝐴𝑙𝑔𝑢𝑛𝑜𝑠 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖𝑜𝑛𝑒𝑠 𝑑𝑖𝑠𝑐𝑟𝑒𝑡𝑎𝑠

𝐷𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝑏𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖
𝑆𝑒𝑎 𝑋: (Ω, 𝜎, 𝑃) → 𝐽 = {0,1}
𝑆𝑒 𝑑𝑖𝑐𝑒 𝑞𝑢𝑒 𝑋 𝑡𝑖𝑒𝑛𝑒 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝑏𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖 𝑠𝑖 𝑠𝑜𝑙𝑜 𝑡𝑖𝑒𝑛𝑒 𝑑𝑜𝑠 𝑝𝑜𝑠𝑖𝑏𝑙𝑒𝑠 𝑟𝑒𝑠𝑢𝑙𝑡𝑎𝑑𝑜𝑠
é𝑥𝑖𝑡𝑜 𝑦 𝑓𝑟𝑎𝑐𝑎𝑠𝑜
𝑑𝑜𝑛𝑑𝑒 𝑎𝑑𝑒𝑚á𝑠
𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑚𝑎𝑠𝑎 𝑑𝑒 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑 𝑓. 𝑚. 𝑝
𝑝 𝑠𝑖 𝑥 = 1
𝑓(𝑥) = {
1 − 𝑝 𝑠𝑖 𝑥 = 0
𝐿𝑎 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑 𝑑𝑒 é𝑥𝑖𝑡𝑜 𝑒𝑠 𝑝 𝑦 𝑙𝑎 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑 𝑑𝑒 𝑓𝑟𝑎𝑐𝑎𝑠𝑜 𝑒𝑠 1 − 𝑝
𝐸𝑛 𝑒𝑠𝑡𝑒 𝑐𝑎𝑠𝑜 𝑑𝑒𝑐𝑖𝑚𝑜𝑠 𝑞𝑢𝑒 𝑋~𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖(𝑝)
"𝑋 𝑠𝑒 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑦𝑒 𝑑𝑒 𝑚𝑎𝑛𝑒𝑟𝑎 𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖 𝑐𝑜𝑛 𝑝𝑎𝑟á𝑚𝑒𝑡𝑟𝑜 𝑝"
𝑁𝑜𝑡𝑎𝑠 𝑖𝑚𝑝𝑜𝑟𝑡𝑎𝑛𝑡𝑒𝑠:
𝐸(𝑋) = 𝑝

𝐸(𝑋) = ∑ 𝑥𝑓(𝑋) = (0)(1 − 𝑝) + (1)𝑝 = 𝑝


𝑋

𝑃𝑜𝑟 𝑜𝑡𝑟𝑜 𝑙𝑎𝑑𝑜

𝑉(𝑋) = 𝐸(𝑋 2 ) − 𝐸(𝑋)2


𝑑𝑜𝑛𝑑𝑒

𝐸(𝑋 2 ) = ∑ 𝑥 2 𝑓(𝑥) = (0)2 (1 − 𝑝) + (1)2 𝑝 = 𝑝


𝑋

𝑉(𝑋) = 𝑝 − 𝑝2 = 𝑝(1 − 𝑝)
𝑃𝑜𝑟 𝑙𝑜 𝑡𝑎𝑛𝑡𝑜 𝑙𝑎 𝑣𝑎𝑟𝑖𝑎𝑛𝑧𝑎 𝑒𝑠:
𝑉(𝑋) = 𝑝𝑞 𝑐𝑜𝑛 𝑞 = 1 − 𝑝
𝑆𝑒 𝑑𝑒𝑓𝑖𝑛𝑒 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑔𝑒𝑛𝑒𝑟𝑎𝑑𝑜𝑟𝑎 𝑑𝑒 𝑚𝑜𝑚𝑒𝑛𝑡𝑜𝑠

𝑚𝑋 (𝑡) = 𝐸(𝑒 𝑡𝑋 ) = ∑ 𝑒 𝑥𝑡 𝑓(𝑥) = 𝑒 (0)𝑡 (1 − 𝑝) + 𝑒 (1)𝑡 𝑝 = 1 − 𝑝 + 𝑝𝑒 𝑡


𝑋

𝑃𝑜𝑟 𝑙𝑜 𝑡𝑎𝑛𝑡𝑜
𝑚𝑋 (𝑡) = 1 − 𝑝 + 𝑝𝑒 𝑡

𝑃𝑟𝑜𝑝𝑖𝑒𝑑𝑎𝑑𝑒𝑠 𝑑𝑒 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑔𝑒𝑛𝑒𝑟𝑎𝑑𝑜𝑟𝑎 𝑑𝑒 𝑚𝑜𝑚𝑒𝑛𝑡𝑜𝑠


(𝑛) 𝑑𝑛 𝑚𝑋
𝐸(𝑋 𝑛 ) = 𝑚𝑋 (0) = (0)
𝑑𝑡 𝑛

𝑆𝑒 𝑑𝑒𝑓𝑖𝑛𝑒 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑐𝑎𝑟𝑎𝑐𝑡𝑒𝑟í𝑠𝑡𝑖𝑐𝑎 𝑑𝑒 𝑢𝑛𝑎 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛

𝜙𝑋 (𝑡) = 𝑚𝑋 (𝑖𝑡) = 1 − 𝑝 + 𝑝𝑒 𝑡𝑖
𝑁𝑜𝑡𝑎:
𝜙𝑋𝑛 (0) = (𝑖)𝑛 𝐸(𝑋 𝑛 )
𝑆𝑒 𝑑𝑒𝑓𝑖𝑛𝑒 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑔𝑒𝑛𝑒𝑟𝑎𝑑𝑜𝑟𝑎 𝑑𝑒 𝑝𝑟𝑜𝑏𝑎𝑖𝑏𝑙𝑖𝑑𝑎𝑑

𝐺𝑋 (𝑡) = 𝐸(𝑡 𝑋 )
𝑃𝑎𝑟𝑎 𝑒𝑙 𝑐𝑎𝑠𝑜 𝑑𝑒 𝑙𝑎 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖(𝑝)

𝐺𝑋 (𝑡) = 𝐸(𝑡 𝑋 ) = ∑ 𝑡 𝑥 𝑓(𝑥) = (𝑡)0 (1 − 𝑝) + (𝑡)1 𝑝 = 1 − 𝑝 + 𝑝𝑡


𝑋

𝑃𝑟𝑜𝑝𝑖𝑒𝑑𝑎𝑑 𝑒𝑛 𝑐𝑎𝑠𝑜 𝑑𝑖𝑠𝑐𝑟𝑒𝑡𝑜

1 𝑑𝑘 𝐺𝑋
𝑃(𝑋 = 𝑘) = (𝑡 = 0)
𝑘! 𝑑𝑡𝑘
𝐷𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙
𝑆𝑢𝑝𝑜𝑛𝑔𝑎𝑚𝑜𝑠 𝑞𝑢𝑒 𝑠𝑒 𝑡𝑖𝑒𝑛𝑒 𝑢𝑛 𝑒𝑥𝑝𝑒𝑟𝑖𝑚𝑒𝑛𝑡𝑜 𝑒𝑛 𝑞𝑢𝑒 𝑐𝑜𝑛𝑠𝑖𝑠𝑡𝑒 𝑒𝑛 𝑟𝑒𝑝𝑒𝑡𝑖𝑟 𝑒𝑙 𝑚𝑖𝑠𝑚𝑜 𝑒𝑥𝑝𝑒𝑟𝑖𝑚𝑒𝑛𝑡𝑜 𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖
𝑛 𝑣𝑒𝑐𝑒𝑠
𝑑𝑜𝑛𝑑𝑒 𝑒𝑛 𝑐𝑎𝑑𝑎 𝑟𝑒𝑝𝑒𝑡𝑖𝑐𝑖ó𝑛, 𝑙𝑎 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑 𝑑𝑒 é𝑥𝑖𝑡𝑜 𝑝 𝑛𝑜 𝑐𝑎𝑚𝑏𝑖𝑎 𝑦 𝑐𝑎𝑑𝑎 𝑟𝑒𝑝𝑒𝑡𝑖𝑐𝑖ó𝑛 𝑒𝑠 𝑖𝑛𝑑𝑒𝑝𝑒𝑛𝑑𝑖𝑒𝑛𝑡𝑒
𝑢𝑛𝑎 𝑑𝑒 𝑙𝑎 𝑜𝑡𝑟𝑎
𝐸𝑛 𝑒𝑠𝑡𝑒 𝑐𝑎𝑠𝑜
𝑋: (Ω, 𝜎, 𝑃) → 𝐽 = {0,1,2,3, … , 𝑛}
𝑑𝑜𝑛𝑑𝑒 𝐽 𝑒𝑠 𝑒𝑙 𝑐𝑜𝑛𝑗𝑢𝑛𝑡𝑜 𝑑𝑒𝑙 𝑛ú𝑚𝑒𝑟𝑜 𝑑𝑒 é𝑥𝑖𝑡𝑜𝑠 𝑞𝑢𝑒 𝑝𝑢𝑒𝑑𝑒 𝑜𝑐𝑢𝑟𝑟𝑖𝑟 𝑒𝑛 𝑒𝑙 𝑒𝑥𝑝𝑒𝑟𝑖𝑚𝑒𝑛𝑡𝑜
𝐸𝑠 𝑑𝑒𝑐𝑖𝑟 𝑐𝑢𝑎𝑛𝑑𝑜 𝑋 𝑑𝑒𝑛𝑜𝑡𝑎 𝑒𝑙 𝑛ú𝑚𝑒𝑟𝑜 𝑑𝑒 é𝑥𝑖𝑡𝑜𝑠 𝑜𝑏𝑡𝑒𝑛𝑖𝑑𝑜𝑠 𝑒𝑛 𝑢𝑛 𝑒𝑥𝑝𝑒𝑟𝑖𝑚𝑒𝑛𝑡𝑜 𝑞𝑢𝑒 𝑠𝑒 𝑟𝑒𝑝𝑖𝑡𝑒 𝑛 𝑣𝑒𝑐𝑒𝑠
𝑒𝑛𝑡𝑜𝑐𝑛𝑒𝑠 𝑠𝑒 𝑑𝑖𝑐𝑒 𝑞𝑢𝑒 𝑋 𝑠𝑖𝑔𝑢𝑒 𝑢𝑛𝑎 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝑏𝑖𝑛𝑜𝑚𝑖𝑎𝑙
𝑋~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛, 𝑝)
¿ 𝐶𝑢á𝑙 𝑒𝑠 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑚𝑎𝑠𝑎 𝑑𝑒 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑?
𝑓(𝑥) = 𝑃(𝑋 = 𝑥)
𝐷𝑒 𝑙𝑜𝑠 𝑛 𝑖𝑛𝑡𝑒𝑛𝑡𝑜𝑠 𝑛𝑒𝑐𝑒𝑠𝑖𝑡𝑎𝑚𝑜𝑠 𝑠ó𝑙𝑜 𝑥 é𝑥𝑖𝑡𝑜𝑠
𝑛
( ) 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
𝑥
𝐸𝑠 𝑑𝑒𝑐𝑖𝑟
𝑠𝑖 𝑋~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛, 𝑝)
𝑒𝑛𝑡𝑜𝑛𝑐𝑒𝑠 𝑙𝑎 𝑓. 𝑚. 𝑝 𝑒𝑠
𝑛
𝑓(𝑥) = ( ) 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
𝑥
𝑐𝑜𝑛 𝑥 = 0,1,2, … , 𝑛
𝐷𝑒𝑚𝑜𝑠𝑡𝑟𝑒𝑚𝑜𝑠 𝑞𝑢𝑒
𝐸(𝑋) = 𝑛𝑝
𝑉(𝑋) = 𝑛𝑝𝑞
𝑛 𝑛
𝑛
𝐸(𝑋) = ∑ 𝑥𝑓(𝑥) = ∑ 𝑥 ( ) 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
𝑥
𝑥=0 𝑥=0
𝑛 𝑛
𝑛! 𝑛!
= ∑𝑥 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥 = ∑ 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
(𝑛 − 𝑥)! 𝑥! (𝑛 − 𝑥)! (𝑥 − 1)!
𝑥=0 𝑥=1

𝑠𝑒𝑎 𝑢 = 𝑥 − 1
𝑛 𝑛−1
𝑛! 𝑛!
∑ 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥 = ∑ 𝑝𝑢+1 (1 − 𝑝)𝑛−𝑢−1
(𝑛 − 𝑥)! (𝑥 − 1)! (𝑛 − 𝑢 − 1)! (𝑢)!
𝑥=1 𝑢=0
𝑛−1
(𝑛 − 1)!
= 𝑛𝑝 ∑ 𝑝𝑢 (1 − 𝑝)𝑛−1−𝑢 = 𝑛𝑝(1) = 𝑛𝑝
(𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0

𝐴𝑙 ℎ𝑎𝑐𝑒𝑟 𝑒𝑙 𝑐𝑎𝑚𝑏𝑖𝑜 𝑢 = 𝑥 − 1
𝑐𝑜𝑛 𝑋~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛, 𝑝)
𝑒𝑛𝑡𝑜𝑛𝑐𝑒𝑠 𝑈~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛 − 1, 𝑝)
𝑛−1 𝑛−1
(𝑛 − 1)! 𝑛 − 1 𝑢 (1
∑ 𝑝𝑢 (1 − 𝑝)𝑛−1−𝑢 = ∑ ( )𝑝 − 𝑝)𝑛−1−𝑢 = 1
(𝑛 − 1 − 𝑢)! (𝑢)! 𝑢
𝑢=0 𝑢=0

𝐴ℎ𝑜𝑟𝑎 𝑝𝑎𝑟𝑎 𝑙𝑎 𝑣𝑎𝑟𝑖𝑎𝑛𝑧𝑎

𝑉(𝑋) = 𝐸(𝑋 2 ) − 𝐸(𝑋)2


𝐹𝑎𝑙𝑡𝑎 𝑒𝑛𝑐𝑜𝑛𝑡𝑟𝑎𝑟
𝑛 𝑛
2) 𝑛 𝑛!
𝐸(𝑋 = ∑ 𝑥 ( ) 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥 = ∑ 𝑥
2
𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
𝑥 (𝑛 − 𝑥)! (𝑥 − 1)!
𝑥=0 𝑥=1

𝑠𝑒𝑎 𝑢 = 𝑥 − 1
𝑛 𝑛−1
𝑛! 𝑛!
∑𝑥 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥 = ∑(𝑢 + 1) 𝑝𝑢+1 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 𝑥)! (𝑥 − 1)! (𝑛 − 1 − 𝑢)! (𝑢)!
𝑥=1 𝑢=0
𝑛−1
𝑛!
∑(𝑢 + 1) 𝑝𝑢+1 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0
𝑛−1
𝑛!
= ∑𝑢 𝑝𝑢+1 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0
𝑛−1
𝑛!
+∑ 𝑝𝑢+1 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0
𝑛−1 𝑛−1
(𝑛 − 1)! (𝑛 − 1)!
= 𝑛𝑝 ∑ 𝑢 𝑝𝑢 (1 − 𝑝)𝑛−1−𝑢 + 𝑛𝑝 ∑ 𝑝𝑢 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 1 − 𝑢)! 𝑢! (𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0 𝑢=0

𝑛𝑝𝐸(𝑈) + 𝑛𝑝(1) = 𝑛𝑝(𝑛 − 1)𝑝 + 𝑛𝑝 = 𝑛𝑝(𝑛𝑝 − 𝑝 + 1) = 𝑛2 𝑝2 − 𝑛𝑝2 + 𝑛𝑝


𝑃𝑜𝑟 𝑙𝑜 𝑡𝑎𝑛𝑡𝑜
𝐸(𝑋 2 ) = 𝑛2 𝑝2 − 𝑛𝑝2 + 𝑛𝑝

𝑉(𝑋) = 𝐸(𝑋 2 ) − 𝐸(𝑋)2 = 𝑛2 𝑝2 − 𝑛𝑝2 + 𝑛𝑝 − (𝑛𝑝)2

= 𝑛𝑝 − 𝑛𝑝2 = 𝑛𝑝(1 − 𝑝) = 𝑛𝑝𝑞


¿ 𝐶𝑢á𝑙 𝑒𝑠 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑔𝑒𝑛𝑒𝑟𝑎𝑑𝑜𝑟𝑎 𝑑𝑒 𝑚𝑜𝑚𝑒𝑛𝑡𝑜𝑠?
𝐷𝑒𝑚𝑜𝑠𝑡𝑟𝑎𝑟 𝑞𝑢𝑒
𝑚𝑋 (𝑡) = (1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛
𝑃𝑜𝑟 𝑑𝑒𝑓𝑖𝑛𝑖𝑐𝑖ó𝑛
𝑛 𝑛
𝑡𝑋 )
𝑛!
𝑚𝑋 (𝑡) = 𝐸(𝑒 = ∑ 𝑒 𝑓(𝑥) = ∑ 𝑒 𝑡𝑥
𝑡𝑥
𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
(𝑛 − 𝑥)! 𝑥!
𝑥=0 𝑥=0
𝑛
𝑛!
=∑ (𝑝𝑒 𝑡 )𝑥 (1 − 𝑝)𝑛−𝑥
(𝑛 − 𝑥)! 𝑥!
𝑥=0

𝐷𝑒𝑙 𝑡𝑒𝑜𝑟𝑒𝑚𝑎 𝑑𝑒𝑙 𝑏𝑖𝑛𝑜𝑚𝑖𝑜


𝑛
𝑛
𝑛!
(𝑎 + 𝑏) = ∑ 𝑎𝑖 𝑏 𝑛−𝑖
(𝑛 − 𝑖)! 𝑖!
𝑖=0

𝐸𝑛𝑡𝑜𝑛𝑐𝑒𝑠
𝑛
𝑛! 𝑛
∑ (𝑝𝑒 𝑡 )𝑥 (1 − 𝑝)𝑛−𝑥 = ((1 − 𝑝) + 𝑝𝑒 𝑡 ) = (1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛
(𝑛 − 𝑥)! 𝑥!
𝑥=0

∴ 𝑚𝑋 (𝑡) = (1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛

𝑁𝑜𝑡𝑒 𝑞𝑢𝑒
𝑑𝑚𝑋
= 𝑛(1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛−1 𝑝𝑒 𝑡 |𝑡=0 = 𝑛𝑝 = 𝐸(𝑋) = 𝜇 → 𝑀𝑒𝑑𝑖𝑎 𝑜 𝑝𝑟𝑜𝑚𝑒𝑑𝑖𝑜
𝑑𝑡
𝑑2 𝑚𝑋
= 𝑛(𝑛 − 1)(1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛−2 𝑝2 𝑒 2𝑡 + 𝑛(1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛−1 𝑝𝑒 𝑡 |𝑡=0 = 𝑛(𝑛 − 1)𝑝2 + 𝑛𝑝
𝑑𝑡 2
= 𝑛2 𝑝2 − 𝑛𝑝2 + 𝑛𝑝 = 𝐸(𝑋 2 )
𝐹𝑢𝑛𝑐𝑖ó𝑛 𝑐𝑎𝑟𝑎𝑐𝑡𝑒𝑟í𝑠𝑡𝑖𝑐𝑎
𝑛
𝜙𝑥 (𝑡) = 𝑚𝑋 (𝑖𝑡) = (1 − 𝑝 + 𝑝𝑒 𝑖𝑡 )

𝐹𝑢𝑛𝑐𝑖ó𝑛 𝑔𝑒𝑛𝑒𝑟𝑎𝑑𝑜𝑟𝑎 𝑑𝑒 𝑝𝑟𝑜𝑏𝑎𝑖𝑏𝑙𝑖𝑑𝑎𝑑


𝑛
𝑋)
𝑛!
𝐺(𝑡) = 𝐸(𝑡 = ∑ 𝑡𝑋 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
(𝑛 − 𝑥)! 𝑥!
𝑥=0
𝑛
𝑛!
=∑ (𝑝𝑡)𝑥 (1 − 𝑝)𝑛−𝑥 = (1 − 𝑝 + 𝑝𝑡)𝑛
(𝑛 − 𝑥)! 𝑥!
𝑥=0

1 𝑑𝑘 𝐺(𝑡)
𝑑𝑜𝑛𝑑𝑒 𝑃(𝑋 = 𝑘) = |
𝑘! 𝑑𝑡𝑘 𝑡=0
𝑁𝑜𝑡𝑒𝑚𝑜𝑠 𝑞𝑢𝑒
𝑆𝑖 𝑋~𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖(𝑝)

𝐸(𝑋) = 𝑝, 𝑉(𝑋) = 𝑝𝑞, 𝑚𝑋 (𝑡) = 1 − 𝑝 + 𝑝𝑒 𝑡 , 𝜙𝑋 (𝑡) = 1 − 𝑝 + 𝑝𝑒 𝑖𝑡 , 𝐺(𝑡) = 1 − 𝑝 + 𝑝𝑡


𝑆𝑖 𝑌~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛, 𝑝)
𝑛
𝐸(𝑌) = 𝑛𝑝, 𝑉(𝑌) = 𝑛𝑝𝑞, 𝑚𝑌 (𝑡) = (1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛 , 𝜙𝑌 (𝑡) = (1 − 𝑝 + 𝑝𝑒 𝑖𝑡 ) , 𝐺(𝑡)
= (1 − 𝑝 + 𝑝𝑡)𝑛
𝑆𝑢𝑝𝑜𝑛𝑔𝑎𝑚𝑜𝑠 𝑞𝑢𝑒 𝑡𝑛𝑒𝑚𝑜𝑠 𝑋1 , 𝑋2 , 𝑋3 , … , 𝑋𝑛 ~𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖(𝑝)
𝑦 𝑑𝑒𝑓𝑖𝑛𝑖𝑚𝑜𝑠 𝑌 = 𝑋1 + 𝑋2 + ⋯ + 𝑋𝑛
𝐸𝑛𝑡𝑜𝑛𝑐𝑒𝑠
𝑛 𝑛 𝑛

𝐸(𝑌) = 𝐸 (∑ 𝑋𝑖 ) = ∑ 𝐸(𝑋𝑖 ) = ∑ 𝑝 = 𝑛𝑝
𝑖=1 𝑖=1 𝑖=1
𝑛 𝑛 𝑛

𝑉𝑎𝑟(𝑌) = 𝑉𝑎𝑟 (∑ 𝑋𝑖 ) = ∑ 𝑉𝑎𝑟(𝑋𝑖 ) = ∑ 𝑝𝑞 = 𝑛𝑝𝑞


𝑖=1 𝑖=1 𝑖=1

𝑇𝑒𝑜𝑟𝑒𝑚𝑎
𝑆𝑖 𝑋1 , 𝑋2 , … , 𝑋𝑛 𝑠𝑜𝑛 𝑣𝑎𝑟𝑖𝑎𝑏𝑙𝑒𝑠 𝑎𝑙𝑒𝑎𝑡𝑜𝑟𝑖𝑎𝑠 𝑖𝑛𝑑𝑒𝑝𝑒𝑛𝑑𝑖𝑒𝑛𝑡𝑒𝑠
𝑛

𝐸𝑛𝑡𝑜𝑛𝑐𝑒𝑠 𝑚𝑋1 +𝑋2 +⋯+𝑋𝑛 (𝑡) = 𝑚∑𝑛𝑖=1 𝑋𝑖 (𝑡) = ∏ 𝑚𝑋𝑖 (𝑡)


𝑖=1
𝑛

𝜙𝑋1 +𝑋2 +⋯+𝑋𝑛 (𝑡) = 𝜙∑𝑛𝑖=1 𝑋𝑖 (𝑡) = ∏ 𝜙𝑋𝑖 (𝑡)


𝑖=1
𝑛

𝐺𝑋1 +𝑋2 +⋯+𝑋𝑛 (𝑡) = 𝐺∑𝑛𝑖=1 𝑋𝑖 (𝑡) = ∏ 𝐺𝑋𝑖 (𝑡)


𝑖=1

𝑇𝑒𝑜𝑟𝑒𝑚𝑎
𝑋, 𝑌 𝑣𝑎𝑟𝑖𝑎𝑏𝑙𝑒𝑠 𝑎𝑙𝑒𝑎𝑡𝑜𝑟𝑖𝑎𝑠 𝑡𝑖𝑒𝑛𝑒𝑛 𝑙𝑎 𝑚𝑖𝑠𝑚𝑎 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝑑𝑒 𝑝𝑟𝑜𝑏𝑎𝑖𝑏𝑙𝑖𝑑𝑎𝑑 𝑠𝑖 𝑦 𝑠𝑜𝑙𝑜 𝑠𝑖
𝑚𝑋 (𝑡) = 𝑚𝑦 (𝑡)

ó 𝐺𝑋 (𝑡) = 𝐺𝑌 (𝑡) ó 𝜙𝑋 (𝑡) = 𝜙𝑌 (𝑡)


𝐸𝑛𝑡𝑜𝑛𝑐𝑒𝑠 𝑟𝑒𝑔𝑟𝑒𝑠𝑎𝑛𝑑𝑜 𝑎 𝑙𝑎 𝑠𝑢𝑚𝑎 𝑑𝑒 𝑣𝑎𝑟𝑖𝑎𝑏𝑙𝑒𝑠 𝑎𝑙𝑒𝑎𝑡𝑜𝑟𝑖𝑎𝑠 𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖
𝑌 = 𝑋1 + 𝑋2 + 𝑋3 + ⋯ + 𝑋𝑛
𝑛 𝑛

𝑚𝑌 (𝑡) = ∏ 𝑚𝑋𝑖( (𝑡) = ∏(1 − 𝑝 + 𝑝𝑒 𝑡 ) = (1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛


𝑖=1 𝑖=1

𝑝𝑒𝑟𝑜 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑔𝑒𝑛𝑒𝑟𝑎𝑑𝑜𝑟𝑎 𝑑𝑒 𝑚𝑜𝑚𝑒𝑛𝑡𝑜𝑠 𝑑𝑒 𝑢𝑛𝑎 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝑏𝑖𝑛𝑜𝑚𝑖𝑎𝑙 𝑒𝑠


(1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛

𝐸𝑛𝑡𝑜𝑛𝑐𝑒𝑠 ℎ𝑒𝑚𝑜𝑠 𝑑𝑒𝑚𝑜𝑠𝑡𝑟𝑎𝑑𝑜 𝑞𝑢𝑒


𝑌 = 𝑋1 + 𝑋2 + 𝑋3 + ⋯ + 𝑋𝑛 ~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛, 𝑝)
𝑃(𝐵) = 𝑃(𝐵|𝑆 𝑒𝑛 𝑒𝑙 𝑝𝑟𝑖𝑚𝑒𝑟 𝑖𝑛𝑡𝑒𝑛𝑡𝑜) + 𝑃(𝐵|𝑆 𝑐 𝑒𝑛 𝑒𝑙 𝑝𝑟𝑖𝑚𝑒𝑟 𝑖𝑛𝑡𝑒𝑛𝑡𝑜 𝑖𝑛𝑡𝑒𝑛𝑡𝑜)
= 𝑃(𝐵)𝑃(𝑆 𝑒𝑛 𝑒𝑙 𝑝𝑟𝑖𝑚𝑒𝑟 𝑖𝑛𝑡𝑒𝑛𝑡𝑜) + 𝑃(𝐵)𝑃(𝑆 𝑐 𝑒𝑛 𝑒𝑙 𝑝𝑟𝑖𝑚𝑒𝑟 𝑖𝑛𝑡𝑒𝑛𝑡𝑜)
= 𝑃(𝐵)[𝑃(𝑆 𝑒𝑛 𝑒𝑙 𝑝𝑟𝑖𝑚𝑒𝑟 𝑖𝑛𝑡𝑒𝑛𝑡𝑜) + 𝑃(𝑆 𝑐 𝑒𝑛 𝑒𝑙 𝑝𝑟𝑖𝑚𝑒𝑟 𝑖𝑛𝑡𝑒𝑛𝑡𝑜)] = 𝑃(𝐵)
𝑃(𝐵|𝑅𝑒𝑠𝑢𝑙𝑡𝑎𝑑𝑜 𝑒𝑛 𝑒𝑙 𝑝𝑟𝑖𝑚𝑒𝑟𝑜) = 𝑃(𝐵)
𝐸𝑛𝑡𝑜𝑛𝑐𝑒𝑠 𝑃(𝐵) = 0.4
𝑃(𝐵|𝐿𝑎 𝑝𝑟𝑖𝑚𝑒𝑟𝑎 𝑒𝑠 𝑆) = 𝑃(𝐵) = 0.4

𝑋 𝑒𝑠 𝑒𝑙 𝑛í𝑢𝑚𝑒𝑟𝑜 𝑑𝑒 𝑐𝑜𝑚𝑝𝑒𝑛𝑒𝑛𝑡𝑒𝑠 𝑞𝑢𝑒 𝑒𝑠𝑡á𝑛 𝑜𝑝𝑒𝑟𝑎𝑛𝑑𝑜 𝑑𝑢𝑟𝑎𝑛𝑡𝑒 𝑚á𝑠 𝑑𝑒 1000 ℎ𝑟𝑠


4
𝑎) 𝑃(𝑋 = 2) = ( ) (0.8)2 (0.2)2 = 0.1536
2
4
4
𝑏) 𝑃(𝑋 ≥ 2) = ∑ ( ) (0.8)𝑥 (0.2)4−𝑥 = 0.9728
𝑥
𝑥=2

También podría gustarte