Documentos de Académico
Documentos de Profesional
Documentos de Cultura
𝐷𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝑏𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖
𝑆𝑒𝑎 𝑋: (Ω, 𝜎, 𝑃) → 𝐽 = {0,1}
𝑆𝑒 𝑑𝑖𝑐𝑒 𝑞𝑢𝑒 𝑋 𝑡𝑖𝑒𝑛𝑒 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝑏𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖 𝑠𝑖 𝑠𝑜𝑙𝑜 𝑡𝑖𝑒𝑛𝑒 𝑑𝑜𝑠 𝑝𝑜𝑠𝑖𝑏𝑙𝑒𝑠 𝑟𝑒𝑠𝑢𝑙𝑡𝑎𝑑𝑜𝑠
é𝑥𝑖𝑡𝑜 𝑦 𝑓𝑟𝑎𝑐𝑎𝑠𝑜
𝑑𝑜𝑛𝑑𝑒 𝑎𝑑𝑒𝑚á𝑠
𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑚𝑎𝑠𝑎 𝑑𝑒 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑 𝑓. 𝑚. 𝑝
𝑝 𝑠𝑖 𝑥 = 1
𝑓(𝑥) = {
1 − 𝑝 𝑠𝑖 𝑥 = 0
𝐿𝑎 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑 𝑑𝑒 é𝑥𝑖𝑡𝑜 𝑒𝑠 𝑝 𝑦 𝑙𝑎 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑 𝑑𝑒 𝑓𝑟𝑎𝑐𝑎𝑠𝑜 𝑒𝑠 1 − 𝑝
𝐸𝑛 𝑒𝑠𝑡𝑒 𝑐𝑎𝑠𝑜 𝑑𝑒𝑐𝑖𝑚𝑜𝑠 𝑞𝑢𝑒 𝑋~𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖(𝑝)
"𝑋 𝑠𝑒 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑦𝑒 𝑑𝑒 𝑚𝑎𝑛𝑒𝑟𝑎 𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖 𝑐𝑜𝑛 𝑝𝑎𝑟á𝑚𝑒𝑡𝑟𝑜 𝑝"
𝑁𝑜𝑡𝑎𝑠 𝑖𝑚𝑝𝑜𝑟𝑡𝑎𝑛𝑡𝑒𝑠:
𝐸(𝑋) = 𝑝
𝑉(𝑋) = 𝑝 − 𝑝2 = 𝑝(1 − 𝑝)
𝑃𝑜𝑟 𝑙𝑜 𝑡𝑎𝑛𝑡𝑜 𝑙𝑎 𝑣𝑎𝑟𝑖𝑎𝑛𝑧𝑎 𝑒𝑠:
𝑉(𝑋) = 𝑝𝑞 𝑐𝑜𝑛 𝑞 = 1 − 𝑝
𝑆𝑒 𝑑𝑒𝑓𝑖𝑛𝑒 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑔𝑒𝑛𝑒𝑟𝑎𝑑𝑜𝑟𝑎 𝑑𝑒 𝑚𝑜𝑚𝑒𝑛𝑡𝑜𝑠
𝑃𝑜𝑟 𝑙𝑜 𝑡𝑎𝑛𝑡𝑜
𝑚𝑋 (𝑡) = 1 − 𝑝 + 𝑝𝑒 𝑡
𝜙𝑋 (𝑡) = 𝑚𝑋 (𝑖𝑡) = 1 − 𝑝 + 𝑝𝑒 𝑡𝑖
𝑁𝑜𝑡𝑎:
𝜙𝑋𝑛 (0) = (𝑖)𝑛 𝐸(𝑋 𝑛 )
𝑆𝑒 𝑑𝑒𝑓𝑖𝑛𝑒 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑔𝑒𝑛𝑒𝑟𝑎𝑑𝑜𝑟𝑎 𝑑𝑒 𝑝𝑟𝑜𝑏𝑎𝑖𝑏𝑙𝑖𝑑𝑎𝑑
𝐺𝑋 (𝑡) = 𝐸(𝑡 𝑋 )
𝑃𝑎𝑟𝑎 𝑒𝑙 𝑐𝑎𝑠𝑜 𝑑𝑒 𝑙𝑎 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖(𝑝)
1 𝑑𝑘 𝐺𝑋
𝑃(𝑋 = 𝑘) = (𝑡 = 0)
𝑘! 𝑑𝑡𝑘
𝐷𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙
𝑆𝑢𝑝𝑜𝑛𝑔𝑎𝑚𝑜𝑠 𝑞𝑢𝑒 𝑠𝑒 𝑡𝑖𝑒𝑛𝑒 𝑢𝑛 𝑒𝑥𝑝𝑒𝑟𝑖𝑚𝑒𝑛𝑡𝑜 𝑒𝑛 𝑞𝑢𝑒 𝑐𝑜𝑛𝑠𝑖𝑠𝑡𝑒 𝑒𝑛 𝑟𝑒𝑝𝑒𝑡𝑖𝑟 𝑒𝑙 𝑚𝑖𝑠𝑚𝑜 𝑒𝑥𝑝𝑒𝑟𝑖𝑚𝑒𝑛𝑡𝑜 𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖
𝑛 𝑣𝑒𝑐𝑒𝑠
𝑑𝑜𝑛𝑑𝑒 𝑒𝑛 𝑐𝑎𝑑𝑎 𝑟𝑒𝑝𝑒𝑡𝑖𝑐𝑖ó𝑛, 𝑙𝑎 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑 𝑑𝑒 é𝑥𝑖𝑡𝑜 𝑝 𝑛𝑜 𝑐𝑎𝑚𝑏𝑖𝑎 𝑦 𝑐𝑎𝑑𝑎 𝑟𝑒𝑝𝑒𝑡𝑖𝑐𝑖ó𝑛 𝑒𝑠 𝑖𝑛𝑑𝑒𝑝𝑒𝑛𝑑𝑖𝑒𝑛𝑡𝑒
𝑢𝑛𝑎 𝑑𝑒 𝑙𝑎 𝑜𝑡𝑟𝑎
𝐸𝑛 𝑒𝑠𝑡𝑒 𝑐𝑎𝑠𝑜
𝑋: (Ω, 𝜎, 𝑃) → 𝐽 = {0,1,2,3, … , 𝑛}
𝑑𝑜𝑛𝑑𝑒 𝐽 𝑒𝑠 𝑒𝑙 𝑐𝑜𝑛𝑗𝑢𝑛𝑡𝑜 𝑑𝑒𝑙 𝑛ú𝑚𝑒𝑟𝑜 𝑑𝑒 é𝑥𝑖𝑡𝑜𝑠 𝑞𝑢𝑒 𝑝𝑢𝑒𝑑𝑒 𝑜𝑐𝑢𝑟𝑟𝑖𝑟 𝑒𝑛 𝑒𝑙 𝑒𝑥𝑝𝑒𝑟𝑖𝑚𝑒𝑛𝑡𝑜
𝐸𝑠 𝑑𝑒𝑐𝑖𝑟 𝑐𝑢𝑎𝑛𝑑𝑜 𝑋 𝑑𝑒𝑛𝑜𝑡𝑎 𝑒𝑙 𝑛ú𝑚𝑒𝑟𝑜 𝑑𝑒 é𝑥𝑖𝑡𝑜𝑠 𝑜𝑏𝑡𝑒𝑛𝑖𝑑𝑜𝑠 𝑒𝑛 𝑢𝑛 𝑒𝑥𝑝𝑒𝑟𝑖𝑚𝑒𝑛𝑡𝑜 𝑞𝑢𝑒 𝑠𝑒 𝑟𝑒𝑝𝑖𝑡𝑒 𝑛 𝑣𝑒𝑐𝑒𝑠
𝑒𝑛𝑡𝑜𝑐𝑛𝑒𝑠 𝑠𝑒 𝑑𝑖𝑐𝑒 𝑞𝑢𝑒 𝑋 𝑠𝑖𝑔𝑢𝑒 𝑢𝑛𝑎 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝑏𝑖𝑛𝑜𝑚𝑖𝑎𝑙
𝑋~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛, 𝑝)
¿ 𝐶𝑢á𝑙 𝑒𝑠 𝑙𝑎 𝑓𝑢𝑛𝑐𝑖ó𝑛 𝑚𝑎𝑠𝑎 𝑑𝑒 𝑝𝑟𝑜𝑏𝑎𝑏𝑖𝑙𝑖𝑑𝑎𝑑?
𝑓(𝑥) = 𝑃(𝑋 = 𝑥)
𝐷𝑒 𝑙𝑜𝑠 𝑛 𝑖𝑛𝑡𝑒𝑛𝑡𝑜𝑠 𝑛𝑒𝑐𝑒𝑠𝑖𝑡𝑎𝑚𝑜𝑠 𝑠ó𝑙𝑜 𝑥 é𝑥𝑖𝑡𝑜𝑠
𝑛
( ) 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
𝑥
𝐸𝑠 𝑑𝑒𝑐𝑖𝑟
𝑠𝑖 𝑋~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛, 𝑝)
𝑒𝑛𝑡𝑜𝑛𝑐𝑒𝑠 𝑙𝑎 𝑓. 𝑚. 𝑝 𝑒𝑠
𝑛
𝑓(𝑥) = ( ) 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
𝑥
𝑐𝑜𝑛 𝑥 = 0,1,2, … , 𝑛
𝐷𝑒𝑚𝑜𝑠𝑡𝑟𝑒𝑚𝑜𝑠 𝑞𝑢𝑒
𝐸(𝑋) = 𝑛𝑝
𝑉(𝑋) = 𝑛𝑝𝑞
𝑛 𝑛
𝑛
𝐸(𝑋) = ∑ 𝑥𝑓(𝑥) = ∑ 𝑥 ( ) 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
𝑥
𝑥=0 𝑥=0
𝑛 𝑛
𝑛! 𝑛!
= ∑𝑥 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥 = ∑ 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥
(𝑛 − 𝑥)! 𝑥! (𝑛 − 𝑥)! (𝑥 − 1)!
𝑥=0 𝑥=1
𝑠𝑒𝑎 𝑢 = 𝑥 − 1
𝑛 𝑛−1
𝑛! 𝑛!
∑ 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥 = ∑ 𝑝𝑢+1 (1 − 𝑝)𝑛−𝑢−1
(𝑛 − 𝑥)! (𝑥 − 1)! (𝑛 − 𝑢 − 1)! (𝑢)!
𝑥=1 𝑢=0
𝑛−1
(𝑛 − 1)!
= 𝑛𝑝 ∑ 𝑝𝑢 (1 − 𝑝)𝑛−1−𝑢 = 𝑛𝑝(1) = 𝑛𝑝
(𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0
𝐴𝑙 ℎ𝑎𝑐𝑒𝑟 𝑒𝑙 𝑐𝑎𝑚𝑏𝑖𝑜 𝑢 = 𝑥 − 1
𝑐𝑜𝑛 𝑋~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛, 𝑝)
𝑒𝑛𝑡𝑜𝑛𝑐𝑒𝑠 𝑈~𝐵𝑖𝑛𝑜𝑚𝑖𝑎𝑙(𝑛 − 1, 𝑝)
𝑛−1 𝑛−1
(𝑛 − 1)! 𝑛 − 1 𝑢 (1
∑ 𝑝𝑢 (1 − 𝑝)𝑛−1−𝑢 = ∑ ( )𝑝 − 𝑝)𝑛−1−𝑢 = 1
(𝑛 − 1 − 𝑢)! (𝑢)! 𝑢
𝑢=0 𝑢=0
𝑠𝑒𝑎 𝑢 = 𝑥 − 1
𝑛 𝑛−1
𝑛! 𝑛!
∑𝑥 𝑝 𝑥 (1 − 𝑝)𝑛−𝑥 = ∑(𝑢 + 1) 𝑝𝑢+1 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 𝑥)! (𝑥 − 1)! (𝑛 − 1 − 𝑢)! (𝑢)!
𝑥=1 𝑢=0
𝑛−1
𝑛!
∑(𝑢 + 1) 𝑝𝑢+1 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0
𝑛−1
𝑛!
= ∑𝑢 𝑝𝑢+1 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0
𝑛−1
𝑛!
+∑ 𝑝𝑢+1 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0
𝑛−1 𝑛−1
(𝑛 − 1)! (𝑛 − 1)!
= 𝑛𝑝 ∑ 𝑢 𝑝𝑢 (1 − 𝑝)𝑛−1−𝑢 + 𝑛𝑝 ∑ 𝑝𝑢 (1 − 𝑝)𝑛−1−𝑢
(𝑛 − 1 − 𝑢)! 𝑢! (𝑛 − 1 − 𝑢)! (𝑢)!
𝑢=0 𝑢=0
𝐸𝑛𝑡𝑜𝑛𝑐𝑒𝑠
𝑛
𝑛! 𝑛
∑ (𝑝𝑒 𝑡 )𝑥 (1 − 𝑝)𝑛−𝑥 = ((1 − 𝑝) + 𝑝𝑒 𝑡 ) = (1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛
(𝑛 − 𝑥)! 𝑥!
𝑥=0
∴ 𝑚𝑋 (𝑡) = (1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛
𝑁𝑜𝑡𝑒 𝑞𝑢𝑒
𝑑𝑚𝑋
= 𝑛(1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛−1 𝑝𝑒 𝑡 |𝑡=0 = 𝑛𝑝 = 𝐸(𝑋) = 𝜇 → 𝑀𝑒𝑑𝑖𝑎 𝑜 𝑝𝑟𝑜𝑚𝑒𝑑𝑖𝑜
𝑑𝑡
𝑑2 𝑚𝑋
= 𝑛(𝑛 − 1)(1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛−2 𝑝2 𝑒 2𝑡 + 𝑛(1 − 𝑝 + 𝑝𝑒 𝑡 )𝑛−1 𝑝𝑒 𝑡 |𝑡=0 = 𝑛(𝑛 − 1)𝑝2 + 𝑛𝑝
𝑑𝑡 2
= 𝑛2 𝑝2 − 𝑛𝑝2 + 𝑛𝑝 = 𝐸(𝑋 2 )
𝐹𝑢𝑛𝑐𝑖ó𝑛 𝑐𝑎𝑟𝑎𝑐𝑡𝑒𝑟í𝑠𝑡𝑖𝑐𝑎
𝑛
𝜙𝑥 (𝑡) = 𝑚𝑋 (𝑖𝑡) = (1 − 𝑝 + 𝑝𝑒 𝑖𝑡 )
1 𝑑𝑘 𝐺(𝑡)
𝑑𝑜𝑛𝑑𝑒 𝑃(𝑋 = 𝑘) = |
𝑘! 𝑑𝑡𝑘 𝑡=0
𝑁𝑜𝑡𝑒𝑚𝑜𝑠 𝑞𝑢𝑒
𝑆𝑖 𝑋~𝐵𝑒𝑟𝑛𝑜𝑢𝑙𝑙𝑖(𝑝)
𝐸(𝑌) = 𝐸 (∑ 𝑋𝑖 ) = ∑ 𝐸(𝑋𝑖 ) = ∑ 𝑝 = 𝑛𝑝
𝑖=1 𝑖=1 𝑖=1
𝑛 𝑛 𝑛
𝑇𝑒𝑜𝑟𝑒𝑚𝑎
𝑆𝑖 𝑋1 , 𝑋2 , … , 𝑋𝑛 𝑠𝑜𝑛 𝑣𝑎𝑟𝑖𝑎𝑏𝑙𝑒𝑠 𝑎𝑙𝑒𝑎𝑡𝑜𝑟𝑖𝑎𝑠 𝑖𝑛𝑑𝑒𝑝𝑒𝑛𝑑𝑖𝑒𝑛𝑡𝑒𝑠
𝑛
𝑇𝑒𝑜𝑟𝑒𝑚𝑎
𝑋, 𝑌 𝑣𝑎𝑟𝑖𝑎𝑏𝑙𝑒𝑠 𝑎𝑙𝑒𝑎𝑡𝑜𝑟𝑖𝑎𝑠 𝑡𝑖𝑒𝑛𝑒𝑛 𝑙𝑎 𝑚𝑖𝑠𝑚𝑎 𝑑𝑖𝑠𝑡𝑟𝑖𝑏𝑢𝑐𝑖ó𝑛 𝑑𝑒 𝑝𝑟𝑜𝑏𝑎𝑖𝑏𝑙𝑖𝑑𝑎𝑑 𝑠𝑖 𝑦 𝑠𝑜𝑙𝑜 𝑠𝑖
𝑚𝑋 (𝑡) = 𝑚𝑦 (𝑡)