add read me
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,159 @@
|
||||
from sympy.concrete.summations import Sum
|
||||
from sympy.core.numbers import (oo, pi)
|
||||
from sympy.core.relational import Eq
|
||||
from sympy.core.singleton import S
|
||||
from sympy.core.symbol import symbols
|
||||
from sympy.functions.combinatorial.factorials import factorial
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.functions.elementary.miscellaneous import sqrt
|
||||
from sympy.functions.elementary.piecewise import Piecewise
|
||||
from sympy.functions.special.beta_functions import beta
|
||||
from sympy.functions.special.error_functions import erf
|
||||
from sympy.functions.special.gamma_functions import gamma
|
||||
from sympy.integrals.integrals import Integral
|
||||
from sympy.sets.sets import Interval
|
||||
from sympy.stats import (Normal, P, E, density, Gamma, Poisson, Rayleigh,
|
||||
variance, Bernoulli, Beta, Uniform, cdf)
|
||||
from sympy.stats.compound_rv import CompoundDistribution, CompoundPSpace
|
||||
from sympy.stats.crv_types import NormalDistribution
|
||||
from sympy.stats.drv_types import PoissonDistribution
|
||||
from sympy.stats.frv_types import BernoulliDistribution
|
||||
from sympy.testing.pytest import raises, ignore_warnings
|
||||
from sympy.stats.joint_rv_types import MultivariateNormalDistribution
|
||||
|
||||
from sympy.abc import x
|
||||
|
||||
|
||||
# helpers for testing troublesome unevaluated expressions
|
||||
flat = lambda s: ''.join(str(s).split())
|
||||
streq = lambda *a: len(set(map(flat, a))) == 1
|
||||
assert streq(x, x)
|
||||
assert streq(x, 'x')
|
||||
assert not streq(x, x + 1)
|
||||
|
||||
|
||||
def test_normal_CompoundDist():
|
||||
X = Normal('X', 1, 2)
|
||||
Y = Normal('X', X, 4)
|
||||
assert density(Y)(x).simplify() == sqrt(10)*exp(-x**2/40 + x/20 - S(1)/40)/(20*sqrt(pi))
|
||||
assert E(Y) == 1 # it is always equal to mean of X
|
||||
assert P(Y > 1) == S(1)/2 # as 1 is the mean
|
||||
assert P(Y > 5).simplify() == S(1)/2 - erf(sqrt(10)/5)/2
|
||||
assert variance(Y) == variance(X) + 4**2 # 2**2 + 4**2
|
||||
# https://math.stackexchange.com/questions/1484451/
|
||||
# (Contains proof of E and variance computation)
|
||||
|
||||
|
||||
def test_poisson_CompoundDist():
|
||||
k, t, y = symbols('k t y', positive=True, real=True)
|
||||
G = Gamma('G', k, t)
|
||||
D = Poisson('P', G)
|
||||
assert density(D)(y).simplify() == t**y*(t + 1)**(-k - y)*gamma(k + y)/(gamma(k)*gamma(y + 1))
|
||||
# https://en.wikipedia.org/wiki/Negative_binomial_distribution#Gamma%E2%80%93Poisson_mixture
|
||||
assert E(D).simplify() == k*t # mean of NegativeBinomialDistribution
|
||||
|
||||
|
||||
def test_bernoulli_CompoundDist():
|
||||
X = Beta('X', 1, 2)
|
||||
Y = Bernoulli('Y', X)
|
||||
assert density(Y).dict == {0: S(2)/3, 1: S(1)/3}
|
||||
assert E(Y) == P(Eq(Y, 1)) == S(1)/3
|
||||
assert variance(Y) == S(2)/9
|
||||
assert cdf(Y) == {0: S(2)/3, 1: 1}
|
||||
|
||||
# test issue 8128
|
||||
a = Bernoulli('a', S(1)/2)
|
||||
b = Bernoulli('b', a)
|
||||
assert density(b).dict == {0: S(1)/2, 1: S(1)/2}
|
||||
assert P(b > 0.5) == S(1)/2
|
||||
|
||||
X = Uniform('X', 0, 1)
|
||||
Y = Bernoulli('Y', X)
|
||||
assert E(Y) == S(1)/2
|
||||
assert P(Eq(Y, 1)) == E(Y)
|
||||
|
||||
|
||||
def test_unevaluated_CompoundDist():
|
||||
# these tests need to be removed once they work with evaluation as they are currently not
|
||||
# evaluated completely in sympy.
|
||||
R = Rayleigh('R', 4)
|
||||
X = Normal('X', 3, R)
|
||||
ans = '''
|
||||
Piecewise(((-sqrt(pi)*sinh(x/4 - 3/4) + sqrt(pi)*cosh(x/4 - 3/4))/(
|
||||
8*sqrt(pi)), Abs(arg(x - 3)) <= pi/4), (Integral(sqrt(2)*exp(-(x - 3)
|
||||
**2/(2*R**2))*exp(-R**2/32)/(32*sqrt(pi)), (R, 0, oo)), True))'''
|
||||
assert streq(density(X)(x), ans)
|
||||
|
||||
expre = '''
|
||||
Integral(X*Integral(sqrt(2)*exp(-(X-3)**2/(2*R**2))*exp(-R**2/32)/(32*
|
||||
sqrt(pi)),(R,0,oo)),(X,-oo,oo))'''
|
||||
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
|
||||
assert streq(E(X, evaluate=False).rewrite(Integral), expre)
|
||||
|
||||
X = Poisson('X', 1)
|
||||
Y = Poisson('Y', X)
|
||||
Z = Poisson('Z', Y)
|
||||
exprd = Sum(exp(-Y)*Y**x*Sum(exp(-1)*exp(-X)*X**Y/(factorial(X)*factorial(Y)
|
||||
), (X, 0, oo))/factorial(x), (Y, 0, oo))
|
||||
assert density(Z)(x) == exprd
|
||||
|
||||
N = Normal('N', 1, 2)
|
||||
M = Normal('M', 3, 4)
|
||||
D = Normal('D', M, N)
|
||||
exprd = '''
|
||||
Integral(sqrt(2)*exp(-(N-1)**2/8)*Integral(exp(-(x-M)**2/(2*N**2))*exp
|
||||
(-(M-3)**2/32)/(8*pi*N),(M,-oo,oo))/(4*sqrt(pi)),(N,-oo,oo))'''
|
||||
assert streq(density(D, evaluate=False)(x), exprd)
|
||||
|
||||
|
||||
def test_Compound_Distribution():
|
||||
X = Normal('X', 2, 4)
|
||||
N = NormalDistribution(X, 4)
|
||||
C = CompoundDistribution(N)
|
||||
assert C.is_Continuous
|
||||
assert C.set == Interval(-oo, oo)
|
||||
assert C.pdf(x, evaluate=True).simplify() == exp(-x**2/64 + x/16 - S(1)/16)/(8*sqrt(pi))
|
||||
|
||||
assert not isinstance(CompoundDistribution(NormalDistribution(2, 3)),
|
||||
CompoundDistribution)
|
||||
M = MultivariateNormalDistribution([1, 2], [[2, 1], [1, 2]])
|
||||
raises(NotImplementedError, lambda: CompoundDistribution(M))
|
||||
|
||||
X = Beta('X', 2, 4)
|
||||
B = BernoulliDistribution(X, 1, 0)
|
||||
C = CompoundDistribution(B)
|
||||
assert C.is_Finite
|
||||
assert C.set == {0, 1}
|
||||
y = symbols('y', negative=False, integer=True)
|
||||
assert C.pdf(y, evaluate=True) == Piecewise((S(1)/(30*beta(2, 4)), Eq(y, 0)),
|
||||
(S(1)/(60*beta(2, 4)), Eq(y, 1)), (0, True))
|
||||
|
||||
k, t, z = symbols('k t z', positive=True, real=True)
|
||||
G = Gamma('G', k, t)
|
||||
X = PoissonDistribution(G)
|
||||
C = CompoundDistribution(X)
|
||||
assert C.is_Discrete
|
||||
assert C.set == S.Naturals0
|
||||
assert C.pdf(z, evaluate=True).simplify() == t**z*(t + 1)**(-k - z)*gamma(k \
|
||||
+ z)/(gamma(k)*gamma(z + 1))
|
||||
|
||||
|
||||
def test_compound_pspace():
|
||||
X = Normal('X', 2, 4)
|
||||
Y = Normal('Y', 3, 6)
|
||||
assert not isinstance(Y.pspace, CompoundPSpace)
|
||||
N = NormalDistribution(1, 2)
|
||||
D = PoissonDistribution(3)
|
||||
B = BernoulliDistribution(0.2, 1, 0)
|
||||
pspace1 = CompoundPSpace('N', N)
|
||||
pspace2 = CompoundPSpace('D', D)
|
||||
pspace3 = CompoundPSpace('B', B)
|
||||
assert not isinstance(pspace1, CompoundPSpace)
|
||||
assert not isinstance(pspace2, CompoundPSpace)
|
||||
assert not isinstance(pspace3, CompoundPSpace)
|
||||
M = MultivariateNormalDistribution([1, 2], [[2, 1], [1, 2]])
|
||||
raises(ValueError, lambda: CompoundPSpace('M', M))
|
||||
Y = Normal('Y', X, 6)
|
||||
assert isinstance(Y.pspace, CompoundPSpace)
|
||||
assert Y.pspace.distribution == CompoundDistribution(NormalDistribution(X, 6))
|
||||
assert Y.pspace.domain.set == Interval(-oo, oo)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,312 @@
|
||||
from sympy.concrete.summations import Sum
|
||||
from sympy.core.numbers import (I, Rational, oo, pi)
|
||||
from sympy.core.singleton import S
|
||||
from sympy.core.symbol import Symbol
|
||||
from sympy.functions.elementary.complexes import (im, re)
|
||||
from sympy.functions.elementary.exponential import log
|
||||
from sympy.functions.elementary.integers import floor
|
||||
from sympy.functions.elementary.miscellaneous import sqrt
|
||||
from sympy.functions.elementary.piecewise import Piecewise
|
||||
from sympy.functions.special.bessel import besseli
|
||||
from sympy.functions.special.beta_functions import beta
|
||||
from sympy.functions.special.zeta_functions import zeta
|
||||
from sympy.sets.sets import FiniteSet
|
||||
from sympy.simplify.simplify import simplify
|
||||
from sympy.utilities.lambdify import lambdify
|
||||
from sympy.core.relational import Eq, Ne
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.logic.boolalg import Or
|
||||
from sympy.sets.fancysets import Range
|
||||
from sympy.stats import (P, E, variance, density, characteristic_function,
|
||||
where, moment_generating_function, skewness, cdf,
|
||||
kurtosis, coskewness)
|
||||
from sympy.stats.drv_types import (PoissonDistribution, GeometricDistribution,
|
||||
FlorySchulz, Poisson, Geometric, Hermite, Logarithmic,
|
||||
NegativeBinomial, Skellam, YuleSimon, Zeta,
|
||||
DiscreteRV)
|
||||
from sympy.testing.pytest import slow, nocache_fail, raises, skip
|
||||
from sympy.stats.symbolic_probability import Expectation
|
||||
from sympy.functions.combinatorial.factorials import FallingFactorial
|
||||
|
||||
x = Symbol('x')
|
||||
|
||||
|
||||
def test_PoissonDistribution():
|
||||
l = 3
|
||||
p = PoissonDistribution(l)
|
||||
assert abs(p.cdf(10).evalf() - 1) < .001
|
||||
assert abs(p.cdf(10.4).evalf() - 1) < .001
|
||||
assert p.expectation(x, x) == l
|
||||
assert p.expectation(x**2, x) - p.expectation(x, x)**2 == l
|
||||
|
||||
|
||||
def test_Poisson():
|
||||
l = 3
|
||||
x = Poisson('x', l)
|
||||
assert E(x) == l
|
||||
assert E(2*x) == 2*l
|
||||
assert variance(x) == l
|
||||
assert density(x) == PoissonDistribution(l)
|
||||
assert isinstance(E(x, evaluate=False), Expectation)
|
||||
assert isinstance(E(2*x, evaluate=False), Expectation)
|
||||
# issue 8248
|
||||
assert x.pspace.compute_expectation(1) == 1
|
||||
# issue 27344
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
skip("numpy not installed")
|
||||
y = Poisson('y', np.float64(4.72544290380919e-11))
|
||||
assert E(y) == 4.72544290380919e-11
|
||||
y = Poisson('y', np.float64(4.725442903809197e-11))
|
||||
assert E(y) == 4.725442903809197e-11
|
||||
l2 = 5
|
||||
z = Poisson('z', l2)
|
||||
assert E(z) == l2
|
||||
assert E(FallingFactorial(z, 3)) == l2**3
|
||||
assert E(z**2) == l2 + l2**2
|
||||
|
||||
|
||||
def test_FlorySchulz():
|
||||
a = Symbol("a")
|
||||
z = Symbol("z")
|
||||
x = FlorySchulz('x', a)
|
||||
assert E(x) == (2 - a)/a
|
||||
assert (variance(x) - 2*(1 - a)/a**2).simplify() == S(0)
|
||||
assert density(x)(z) == a**2*z*(1 - a)**(z - 1)
|
||||
|
||||
|
||||
@slow
|
||||
def test_GeometricDistribution():
|
||||
p = S.One / 5
|
||||
d = GeometricDistribution(p)
|
||||
assert d.expectation(x, x) == 1/p
|
||||
assert d.expectation(x**2, x) - d.expectation(x, x)**2 == (1-p)/p**2
|
||||
assert abs(d.cdf(20000).evalf() - 1) < .001
|
||||
assert abs(d.cdf(20000.8).evalf() - 1) < .001
|
||||
G = Geometric('G', p=S(1)/4)
|
||||
assert cdf(G)(S(7)/2) == P(G <= S(7)/2)
|
||||
|
||||
X = Geometric('X', Rational(1, 5))
|
||||
Y = Geometric('Y', Rational(3, 10))
|
||||
assert coskewness(X, X + Y, X + 2*Y).simplify() == sqrt(230)*Rational(81, 1150)
|
||||
|
||||
|
||||
def test_Hermite():
|
||||
a1 = Symbol("a1", positive=True)
|
||||
a2 = Symbol("a2", negative=True)
|
||||
raises(ValueError, lambda: Hermite("H", a1, a2))
|
||||
|
||||
a1 = Symbol("a1", negative=True)
|
||||
a2 = Symbol("a2", positive=True)
|
||||
raises(ValueError, lambda: Hermite("H", a1, a2))
|
||||
|
||||
a1 = Symbol("a1", positive=True)
|
||||
x = Symbol("x")
|
||||
H = Hermite("H", a1, a2)
|
||||
assert moment_generating_function(H)(x) == exp(a1*(exp(x) - 1)
|
||||
+ a2*(exp(2*x) - 1))
|
||||
assert characteristic_function(H)(x) == exp(a1*(exp(I*x) - 1)
|
||||
+ a2*(exp(2*I*x) - 1))
|
||||
assert E(H) == a1 + 2*a2
|
||||
|
||||
H = Hermite("H", a1=5, a2=4)
|
||||
assert density(H)(2) == 33*exp(-9)/2
|
||||
assert E(H) == 13
|
||||
assert variance(H) == 21
|
||||
assert kurtosis(H) == Rational(464,147)
|
||||
assert skewness(H) == 37*sqrt(21)/441
|
||||
|
||||
def test_Logarithmic():
|
||||
p = S.Half
|
||||
x = Logarithmic('x', p)
|
||||
assert E(x) == -p / ((1 - p) * log(1 - p))
|
||||
assert variance(x) == -1/log(2)**2 + 2/log(2)
|
||||
assert E(2*x**2 + 3*x + 4) == 4 + 7 / log(2)
|
||||
assert isinstance(E(x, evaluate=False), Expectation)
|
||||
|
||||
|
||||
@nocache_fail
|
||||
def test_negative_binomial():
|
||||
r = 5
|
||||
p = S.One / 3
|
||||
x = NegativeBinomial('x', r, p)
|
||||
assert E(x) == r * (1 - p) / p
|
||||
# This hangs when run with the cache disabled:
|
||||
assert variance(x) == r * (1 - p) / p**2
|
||||
assert E(x**5 + 2*x + 3) == E(x**5) + 2*E(x) + 3 == Rational(796473, 1)
|
||||
assert isinstance(E(x, evaluate=False), Expectation)
|
||||
|
||||
|
||||
def test_skellam():
|
||||
mu1 = Symbol('mu1')
|
||||
mu2 = Symbol('mu2')
|
||||
z = Symbol('z')
|
||||
X = Skellam('x', mu1, mu2)
|
||||
|
||||
assert density(X)(z) == (mu1/mu2)**(z/2) * \
|
||||
exp(-mu1 - mu2)*besseli(z, 2*sqrt(mu1*mu2))
|
||||
assert skewness(X).expand() == mu1/(mu1*sqrt(mu1 + mu2) + mu2 *
|
||||
sqrt(mu1 + mu2)) - mu2/(mu1*sqrt(mu1 + mu2) + mu2*sqrt(mu1 + mu2))
|
||||
assert variance(X).expand() == mu1 + mu2
|
||||
assert E(X) == mu1 - mu2
|
||||
assert characteristic_function(X)(z) == exp(
|
||||
mu1*exp(I*z) - mu1 - mu2 + mu2*exp(-I*z))
|
||||
assert moment_generating_function(X)(z) == exp(
|
||||
mu1*exp(z) - mu1 - mu2 + mu2*exp(-z))
|
||||
|
||||
|
||||
def test_yule_simon():
|
||||
from sympy.core.singleton import S
|
||||
rho = S(3)
|
||||
x = YuleSimon('x', rho)
|
||||
assert simplify(E(x)) == rho / (rho - 1)
|
||||
assert simplify(variance(x)) == rho**2 / ((rho - 1)**2 * (rho - 2))
|
||||
assert isinstance(E(x, evaluate=False), Expectation)
|
||||
# To test the cdf function
|
||||
assert cdf(x)(x) == Piecewise((-beta(floor(x), 4)*floor(x) + 1, x >= 1), (0, True))
|
||||
|
||||
|
||||
def test_zeta():
|
||||
s = S(5)
|
||||
x = Zeta('x', s)
|
||||
assert E(x) == zeta(s-1) / zeta(s)
|
||||
assert simplify(variance(x)) == (
|
||||
zeta(s) * zeta(s-2) - zeta(s-1)**2) / zeta(s)**2
|
||||
|
||||
|
||||
def test_discrete_probability():
|
||||
X = Geometric('X', Rational(1, 5))
|
||||
Y = Poisson('Y', 4)
|
||||
G = Geometric('e', x)
|
||||
assert P(Eq(X, 3)) == Rational(16, 125)
|
||||
assert P(X < 3) == Rational(9, 25)
|
||||
assert P(X > 3) == Rational(64, 125)
|
||||
assert P(X >= 3) == Rational(16, 25)
|
||||
assert P(X <= 3) == Rational(61, 125)
|
||||
assert P(Ne(X, 3)) == Rational(109, 125)
|
||||
assert P(Eq(Y, 3)) == 32*exp(-4)/3
|
||||
assert P(Y < 3) == 13*exp(-4)
|
||||
assert P(Y > 3).equals(32*(Rational(-71, 32) + 3*exp(4)/32)*exp(-4)/3)
|
||||
assert P(Y >= 3).equals(32*(Rational(-39, 32) + 3*exp(4)/32)*exp(-4)/3)
|
||||
assert P(Y <= 3) == 71*exp(-4)/3
|
||||
assert P(Ne(Y, 3)).equals(
|
||||
13*exp(-4) + 32*(Rational(-71, 32) + 3*exp(4)/32)*exp(-4)/3)
|
||||
assert P(X < S.Infinity) is S.One
|
||||
assert P(X > S.Infinity) is S.Zero
|
||||
assert P(G < 3) == x*(2-x)
|
||||
assert P(Eq(G, 3)) == x*(-x + 1)**2
|
||||
|
||||
|
||||
def test_DiscreteRV():
|
||||
p = S(1)/2
|
||||
x = Symbol('x', integer=True, positive=True)
|
||||
pdf = p*(1 - p)**(x - 1) # pdf of Geometric Distribution
|
||||
D = DiscreteRV(x, pdf, set=S.Naturals, check=True)
|
||||
assert E(D) == E(Geometric('G', S(1)/2)) == 2
|
||||
assert P(D > 3) == S(1)/8
|
||||
assert D.pspace.domain.set == S.Naturals
|
||||
raises(ValueError, lambda: DiscreteRV(x, x, FiniteSet(*range(4)), check=True))
|
||||
|
||||
# purposeful invalid pmf but it should not raise since check=False
|
||||
# see test_drv_types.test_ContinuousRV for explanation
|
||||
X = DiscreteRV(x, 1/x, S.Naturals)
|
||||
assert P(X < 2) == 1
|
||||
assert E(X) == oo
|
||||
|
||||
def test_precomputed_characteristic_functions():
|
||||
import mpmath
|
||||
|
||||
def test_cf(dist, support_lower_limit, support_upper_limit):
|
||||
pdf = density(dist)
|
||||
t = S('t')
|
||||
x = S('x')
|
||||
|
||||
# first function is the hardcoded CF of the distribution
|
||||
cf1 = lambdify([t], characteristic_function(dist)(t), 'mpmath')
|
||||
|
||||
# second function is the Fourier transform of the density function
|
||||
f = lambdify([x, t], pdf(x)*exp(I*x*t), 'mpmath')
|
||||
cf2 = lambda t: mpmath.nsum(lambda x: f(x, t), [
|
||||
support_lower_limit, support_upper_limit], maxdegree=10)
|
||||
|
||||
# compare the two functions at various points
|
||||
for test_point in [2, 5, 8, 11]:
|
||||
n1 = cf1(test_point)
|
||||
n2 = cf2(test_point)
|
||||
|
||||
assert abs(re(n1) - re(n2)) < 1e-12
|
||||
assert abs(im(n1) - im(n2)) < 1e-12
|
||||
|
||||
test_cf(Geometric('g', Rational(1, 3)), 1, mpmath.inf)
|
||||
test_cf(Logarithmic('l', Rational(1, 5)), 1, mpmath.inf)
|
||||
test_cf(NegativeBinomial('n', 5, Rational(1, 7)), 0, mpmath.inf)
|
||||
test_cf(Poisson('p', 5), 0, mpmath.inf)
|
||||
test_cf(YuleSimon('y', 5), 1, mpmath.inf)
|
||||
test_cf(Zeta('z', 5), 1, mpmath.inf)
|
||||
|
||||
|
||||
def test_moment_generating_functions():
|
||||
t = S('t')
|
||||
|
||||
geometric_mgf = moment_generating_function(Geometric('g', S.Half))(t)
|
||||
assert geometric_mgf.diff(t).subs(t, 0) == 2
|
||||
|
||||
logarithmic_mgf = moment_generating_function(Logarithmic('l', S.Half))(t)
|
||||
assert logarithmic_mgf.diff(t).subs(t, 0) == 1/log(2)
|
||||
|
||||
negative_binomial_mgf = moment_generating_function(
|
||||
NegativeBinomial('n', 5, Rational(1, 3)))(t)
|
||||
assert negative_binomial_mgf.diff(t).subs(t, 0) == Rational(10, 1)
|
||||
|
||||
poisson_mgf = moment_generating_function(Poisson('p', 5))(t)
|
||||
assert poisson_mgf.diff(t).subs(t, 0) == 5
|
||||
|
||||
skellam_mgf = moment_generating_function(Skellam('s', 1, 1))(t)
|
||||
assert skellam_mgf.diff(t).subs(
|
||||
t, 2) == (-exp(-2) + exp(2))*exp(-2 + exp(-2) + exp(2))
|
||||
|
||||
yule_simon_mgf = moment_generating_function(YuleSimon('y', 3))(t)
|
||||
assert simplify(yule_simon_mgf.diff(t).subs(t, 0)) == Rational(3, 2)
|
||||
|
||||
zeta_mgf = moment_generating_function(Zeta('z', 5))(t)
|
||||
assert zeta_mgf.diff(t).subs(t, 0) == pi**4/(90*zeta(5))
|
||||
|
||||
|
||||
def test_Or():
|
||||
X = Geometric('X', S.Half)
|
||||
assert P(Or(X < 3, X > 4)) == Rational(13, 16)
|
||||
assert P(Or(X > 2, X > 1)) == P(X > 1)
|
||||
assert P(Or(X >= 3, X < 3)) == 1
|
||||
|
||||
|
||||
def test_where():
|
||||
X = Geometric('X', Rational(1, 5))
|
||||
Y = Poisson('Y', 4)
|
||||
assert where(X**2 > 4).set == Range(3, S.Infinity, 1)
|
||||
assert where(X**2 >= 4).set == Range(2, S.Infinity, 1)
|
||||
assert where(Y**2 < 9).set == Range(0, 3, 1)
|
||||
assert where(Y**2 <= 9).set == Range(0, 4, 1)
|
||||
|
||||
|
||||
def test_conditional():
|
||||
X = Geometric('X', Rational(2, 3))
|
||||
Y = Poisson('Y', 3)
|
||||
assert P(X > 2, X > 3) == 1
|
||||
assert P(X > 3, X > 2) == Rational(1, 3)
|
||||
assert P(Y > 2, Y < 2) == 0
|
||||
assert P(Eq(Y, 3), Y >= 0) == 9*exp(-3)/2
|
||||
assert P(Eq(Y, 3), Eq(Y, 2)) == 0
|
||||
assert P(X < 2, Eq(X, 2)) == 0
|
||||
assert P(X > 2, Eq(X, 3)) == 1
|
||||
|
||||
|
||||
def test_product_spaces():
|
||||
X1 = Geometric('X1', S.Half)
|
||||
X2 = Geometric('X2', Rational(1, 3))
|
||||
assert str(P(X1 + X2 < 3).rewrite(Sum)) == (
|
||||
"Sum(Piecewise((1/(4*2**n), n >= -1), (0, True)), (n, -oo, -1))/3")
|
||||
assert str(P(X1 + X2 > 3).rewrite(Sum)) == (
|
||||
'Sum(Piecewise((2**(X2 - n - 2)*(2/3)**(X2 - 1)/6, '
|
||||
'X2 - n <= 2), (0, True)), (X2, 1, oo), (n, 1, oo))')
|
||||
assert P(Eq(X1 + X2, 3)) == Rational(1, 12)
|
||||
@@ -0,0 +1,60 @@
|
||||
from sympy.core.function import Function
|
||||
from sympy.core.symbol import symbols
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.stats.error_prop import variance_prop
|
||||
from sympy.stats.symbolic_probability import (RandomSymbol, Variance,
|
||||
Covariance)
|
||||
|
||||
|
||||
def test_variance_prop():
|
||||
x, y, z = symbols('x y z')
|
||||
phi, t = consts = symbols('phi t')
|
||||
a = RandomSymbol(x)
|
||||
var_x = Variance(a)
|
||||
var_y = Variance(RandomSymbol(y))
|
||||
var_z = Variance(RandomSymbol(z))
|
||||
f = Function('f')(x)
|
||||
cases = {
|
||||
x + y: var_x + var_y,
|
||||
a + y: var_x + var_y,
|
||||
x + y + z: var_x + var_y + var_z,
|
||||
2*x: 4*var_x,
|
||||
x*y: var_x*y**2 + var_y*x**2,
|
||||
1/x: var_x/x**4,
|
||||
x/y: (var_x*y**2 + var_y*x**2)/y**4,
|
||||
exp(x): var_x*exp(2*x),
|
||||
exp(2*x): 4*var_x*exp(4*x),
|
||||
exp(-x*t): t**2*var_x*exp(-2*t*x),
|
||||
f: Variance(f),
|
||||
}
|
||||
for inp, out in cases.items():
|
||||
obs = variance_prop(inp, consts=consts)
|
||||
assert out == obs
|
||||
|
||||
def test_variance_prop_with_covar():
|
||||
x, y, z = symbols('x y z')
|
||||
phi, t = consts = symbols('phi t')
|
||||
a = RandomSymbol(x)
|
||||
var_x = Variance(a)
|
||||
b = RandomSymbol(y)
|
||||
var_y = Variance(b)
|
||||
c = RandomSymbol(z)
|
||||
var_z = Variance(c)
|
||||
covar_x_y = Covariance(a, b)
|
||||
covar_x_z = Covariance(a, c)
|
||||
covar_y_z = Covariance(b, c)
|
||||
cases = {
|
||||
x + y: var_x + var_y + 2*covar_x_y,
|
||||
a + y: var_x + var_y + 2*covar_x_y,
|
||||
x + y + z: var_x + var_y + var_z + \
|
||||
2*covar_x_y + 2*covar_x_z + 2*covar_y_z,
|
||||
2*x: 4*var_x,
|
||||
x*y: var_x*y**2 + var_y*x**2 + 2*covar_x_y/(x*y),
|
||||
1/x: var_x/x**4,
|
||||
exp(x): var_x*exp(2*x),
|
||||
exp(2*x): 4*var_x*exp(4*x),
|
||||
exp(-x*t): t**2*var_x*exp(-2*t*x),
|
||||
}
|
||||
for inp, out in cases.items():
|
||||
obs = variance_prop(inp, consts=consts, include_covar=True)
|
||||
assert out == obs
|
||||
@@ -0,0 +1,509 @@
|
||||
from sympy.concrete.summations import Sum
|
||||
from sympy.core.containers import (Dict, Tuple)
|
||||
from sympy.core.function import Function
|
||||
from sympy.core.numbers import (I, Rational, nan)
|
||||
from sympy.core.relational import Eq
|
||||
from sympy.core.singleton import S
|
||||
from sympy.core.symbol import (Dummy, Symbol, symbols)
|
||||
from sympy.core.sympify import sympify
|
||||
from sympy.functions.combinatorial.factorials import binomial
|
||||
from sympy.functions.combinatorial.numbers import harmonic
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.functions.elementary.miscellaneous import sqrt
|
||||
from sympy.functions.elementary.piecewise import Piecewise
|
||||
from sympy.functions.elementary.trigonometric import cos
|
||||
from sympy.functions.special.beta_functions import beta
|
||||
from sympy.logic.boolalg import (And, Or)
|
||||
from sympy.polys.polytools import cancel
|
||||
from sympy.sets.sets import FiniteSet
|
||||
from sympy.simplify.simplify import simplify
|
||||
from sympy.matrices import Matrix
|
||||
from sympy.stats import (DiscreteUniform, Die, Bernoulli, Coin, Binomial, BetaBinomial,
|
||||
Hypergeometric, Rademacher, IdealSoliton, RobustSoliton, P, E, variance,
|
||||
covariance, skewness, density, where, FiniteRV, pspace, cdf,
|
||||
correlation, moment, cmoment, smoment, characteristic_function,
|
||||
moment_generating_function, quantile, kurtosis, median, coskewness)
|
||||
from sympy.stats.frv_types import DieDistribution, BinomialDistribution, \
|
||||
HypergeometricDistribution
|
||||
from sympy.stats.rv import Density
|
||||
from sympy.testing.pytest import raises
|
||||
|
||||
|
||||
def BayesTest(A, B):
|
||||
assert P(A, B) == P(And(A, B)) / P(B)
|
||||
assert P(A, B) == P(B, A) * P(A) / P(B)
|
||||
|
||||
|
||||
def test_discreteuniform():
|
||||
# Symbolic
|
||||
a, b, c, t = symbols('a b c t')
|
||||
X = DiscreteUniform('X', [a, b, c])
|
||||
|
||||
assert E(X) == (a + b + c)/3
|
||||
assert simplify(variance(X)
|
||||
- ((a**2 + b**2 + c**2)/3 - (a/3 + b/3 + c/3)**2)) == 0
|
||||
assert P(Eq(X, a)) == P(Eq(X, b)) == P(Eq(X, c)) == S('1/3')
|
||||
|
||||
Y = DiscreteUniform('Y', range(-5, 5))
|
||||
|
||||
# Numeric
|
||||
assert E(Y) == S('-1/2')
|
||||
assert variance(Y) == S('33/4')
|
||||
assert median(Y) == FiniteSet(-1, 0)
|
||||
|
||||
for x in range(-5, 5):
|
||||
assert P(Eq(Y, x)) == S('1/10')
|
||||
assert P(Y <= x) == S(x + 6)/10
|
||||
assert P(Y >= x) == S(5 - x)/10
|
||||
|
||||
assert dict(density(Die('D', 6)).items()) == \
|
||||
dict(density(DiscreteUniform('U', range(1, 7))).items())
|
||||
|
||||
assert characteristic_function(X)(t) == exp(I*a*t)/3 + exp(I*b*t)/3 + exp(I*c*t)/3
|
||||
assert moment_generating_function(X)(t) == exp(a*t)/3 + exp(b*t)/3 + exp(c*t)/3
|
||||
# issue 18611
|
||||
raises(ValueError, lambda: DiscreteUniform('Z', [a, a, a, b, b, c]))
|
||||
|
||||
def test_dice():
|
||||
# TODO: Make iid method!
|
||||
X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6)
|
||||
a, b, t, p = symbols('a b t p')
|
||||
|
||||
assert E(X) == 3 + S.Half
|
||||
assert variance(X) == Rational(35, 12)
|
||||
assert E(X + Y) == 7
|
||||
assert E(X + X) == 7
|
||||
assert E(a*X + b) == a*E(X) + b
|
||||
assert variance(X + Y) == variance(X) + variance(Y) == cmoment(X + Y, 2)
|
||||
assert variance(X + X) == 4 * variance(X) == cmoment(X + X, 2)
|
||||
assert cmoment(X, 0) == 1
|
||||
assert cmoment(4*X, 3) == 64*cmoment(X, 3)
|
||||
assert covariance(X, Y) is S.Zero
|
||||
assert covariance(X, X + Y) == variance(X)
|
||||
assert density(Eq(cos(X*S.Pi), 1))[True] == S.Half
|
||||
assert correlation(X, Y) == 0
|
||||
assert correlation(X, Y) == correlation(Y, X)
|
||||
assert smoment(X + Y, 3) == skewness(X + Y)
|
||||
assert smoment(X + Y, 4) == kurtosis(X + Y)
|
||||
assert smoment(X, 0) == 1
|
||||
assert P(X > 3) == S.Half
|
||||
assert P(2*X > 6) == S.Half
|
||||
assert P(X > Y) == Rational(5, 12)
|
||||
assert P(Eq(X, Y)) == P(Eq(X, 1))
|
||||
|
||||
assert E(X, X > 3) == 5 == moment(X, 1, 0, X > 3)
|
||||
assert E(X, Y > 3) == E(X) == moment(X, 1, 0, Y > 3)
|
||||
assert E(X + Y, Eq(X, Y)) == E(2*X)
|
||||
assert moment(X, 0) == 1
|
||||
assert moment(5*X, 2) == 25*moment(X, 2)
|
||||
assert quantile(X)(p) == Piecewise((nan, (p > 1) | (p < 0)),\
|
||||
(S.One, p <= Rational(1, 6)), (S(2), p <= Rational(1, 3)), (S(3), p <= S.Half),\
|
||||
(S(4), p <= Rational(2, 3)), (S(5), p <= Rational(5, 6)), (S(6), p <= 1))
|
||||
|
||||
assert P(X > 3, X > 3) is S.One
|
||||
assert P(X > Y, Eq(Y, 6)) is S.Zero
|
||||
assert P(Eq(X + Y, 12)) == Rational(1, 36)
|
||||
assert P(Eq(X + Y, 12), Eq(X, 6)) == Rational(1, 6)
|
||||
|
||||
assert density(X + Y) == density(Y + Z) != density(X + X)
|
||||
d = density(2*X + Y**Z)
|
||||
assert d[S(22)] == Rational(1, 108) and d[S(4100)] == Rational(1, 216) and S(3130) not in d
|
||||
|
||||
assert pspace(X).domain.as_boolean() == Or(
|
||||
*[Eq(X.symbol, i) for i in [1, 2, 3, 4, 5, 6]])
|
||||
|
||||
assert where(X > 3).set == FiniteSet(4, 5, 6)
|
||||
|
||||
assert characteristic_function(X)(t) == exp(6*I*t)/6 + exp(5*I*t)/6 + exp(4*I*t)/6 + exp(3*I*t)/6 + exp(2*I*t)/6 + exp(I*t)/6
|
||||
assert moment_generating_function(X)(t) == exp(6*t)/6 + exp(5*t)/6 + exp(4*t)/6 + exp(3*t)/6 + exp(2*t)/6 + exp(t)/6
|
||||
assert median(X) == FiniteSet(3, 4)
|
||||
D = Die('D', 7)
|
||||
assert median(D) == FiniteSet(4)
|
||||
# Bayes test for die
|
||||
BayesTest(X > 3, X + Y < 5)
|
||||
BayesTest(Eq(X - Y, Z), Z > Y)
|
||||
BayesTest(X > 3, X > 2)
|
||||
|
||||
# arg test for die
|
||||
raises(ValueError, lambda: Die('X', -1)) # issue 8105: negative sides.
|
||||
raises(ValueError, lambda: Die('X', 0))
|
||||
raises(ValueError, lambda: Die('X', 1.5)) # issue 8103: non integer sides.
|
||||
|
||||
# symbolic test for die
|
||||
n, k = symbols('n, k', positive=True)
|
||||
D = Die('D', n)
|
||||
dens = density(D).dict
|
||||
assert dens == Density(DieDistribution(n))
|
||||
assert set(dens.subs(n, 4).doit().keys()) == {1, 2, 3, 4}
|
||||
assert set(dens.subs(n, 4).doit().values()) == {Rational(1, 4)}
|
||||
k = Dummy('k', integer=True)
|
||||
assert E(D).dummy_eq(
|
||||
Sum(Piecewise((k/n, k <= n), (0, True)), (k, 1, n)))
|
||||
assert variance(D).subs(n, 6).doit() == Rational(35, 12)
|
||||
|
||||
ki = Dummy('ki')
|
||||
cumuf = cdf(D)(k)
|
||||
assert cumuf.dummy_eq(
|
||||
Sum(Piecewise((1/n, (ki >= 1) & (ki <= n)), (0, True)), (ki, 1, k)))
|
||||
assert cumuf.subs({n: 6, k: 2}).doit() == Rational(1, 3)
|
||||
|
||||
t = Dummy('t')
|
||||
cf = characteristic_function(D)(t)
|
||||
assert cf.dummy_eq(
|
||||
Sum(Piecewise((exp(ki*I*t)/n, (ki >= 1) & (ki <= n)), (0, True)), (ki, 1, n)))
|
||||
assert cf.subs(n, 3).doit() == exp(3*I*t)/3 + exp(2*I*t)/3 + exp(I*t)/3
|
||||
mgf = moment_generating_function(D)(t)
|
||||
assert mgf.dummy_eq(
|
||||
Sum(Piecewise((exp(ki*t)/n, (ki >= 1) & (ki <= n)), (0, True)), (ki, 1, n)))
|
||||
assert mgf.subs(n, 3).doit() == exp(3*t)/3 + exp(2*t)/3 + exp(t)/3
|
||||
|
||||
def test_given():
|
||||
X = Die('X', 6)
|
||||
assert density(X, X > 5) == {S(6): S.One}
|
||||
assert where(X > 2, X > 5).as_boolean() == Eq(X.symbol, 6)
|
||||
|
||||
|
||||
def test_domains():
|
||||
X, Y = Die('x', 6), Die('y', 6)
|
||||
x, y = X.symbol, Y.symbol
|
||||
# Domains
|
||||
d = where(X > Y)
|
||||
assert d.condition == (x > y)
|
||||
d = where(And(X > Y, Y > 3))
|
||||
assert d.as_boolean() == Or(And(Eq(x, 5), Eq(y, 4)), And(Eq(x, 6),
|
||||
Eq(y, 5)), And(Eq(x, 6), Eq(y, 4)))
|
||||
assert len(d.elements) == 3
|
||||
|
||||
assert len(pspace(X + Y).domain.elements) == 36
|
||||
|
||||
Z = Die('x', 4)
|
||||
|
||||
raises(ValueError, lambda: P(X > Z)) # Two domains with same internal symbol
|
||||
|
||||
assert pspace(X + Y).domain.set == FiniteSet(1, 2, 3, 4, 5, 6)**2
|
||||
|
||||
assert where(X > 3).set == FiniteSet(4, 5, 6)
|
||||
assert X.pspace.domain.dict == FiniteSet(
|
||||
*[Dict({X.symbol: i}) for i in range(1, 7)])
|
||||
|
||||
assert where(X > Y).dict == FiniteSet(*[Dict({X.symbol: i, Y.symbol: j})
|
||||
for i in range(1, 7) for j in range(1, 7) if i > j])
|
||||
|
||||
def test_bernoulli():
|
||||
p, a, b, t = symbols('p a b t')
|
||||
X = Bernoulli('B', p, a, b)
|
||||
|
||||
assert E(X) == a*p + b*(-p + 1)
|
||||
assert density(X)[a] == p
|
||||
assert density(X)[b] == 1 - p
|
||||
assert characteristic_function(X)(t) == p * exp(I * a * t) + (-p + 1) * exp(I * b * t)
|
||||
assert moment_generating_function(X)(t) == p * exp(a * t) + (-p + 1) * exp(b * t)
|
||||
|
||||
X = Bernoulli('B', p, 1, 0)
|
||||
z = Symbol("z")
|
||||
|
||||
assert E(X) == p
|
||||
assert simplify(variance(X)) == p*(1 - p)
|
||||
assert E(a*X + b) == a*E(X) + b
|
||||
assert simplify(variance(a*X + b)) == simplify(a**2 * variance(X))
|
||||
assert quantile(X)(z) == Piecewise((nan, (z > 1) | (z < 0)), (0, z <= 1 - p), (1, z <= 1))
|
||||
Y = Bernoulli('Y', Rational(1, 2))
|
||||
assert median(Y) == FiniteSet(0, 1)
|
||||
Z = Bernoulli('Z', Rational(2, 3))
|
||||
assert median(Z) == FiniteSet(1)
|
||||
raises(ValueError, lambda: Bernoulli('B', 1.5))
|
||||
raises(ValueError, lambda: Bernoulli('B', -0.5))
|
||||
|
||||
#issue 8248
|
||||
assert X.pspace.compute_expectation(1) == 1
|
||||
|
||||
p = Rational(1, 5)
|
||||
X = Binomial('X', 5, p)
|
||||
Y = Binomial('Y', 7, 2*p)
|
||||
Z = Binomial('Z', 9, 3*p)
|
||||
assert coskewness(Y + Z, X + Y, X + Z).simplify() == 0
|
||||
assert coskewness(Y + 2*X + Z, X + 2*Y + Z, X + 2*Z + Y).simplify() == \
|
||||
sqrt(1529)*Rational(12, 16819)
|
||||
assert coskewness(Y + 2*X + Z, X + 2*Y + Z, X + 2*Z + Y, X < 2).simplify() \
|
||||
== -sqrt(357451121)*Rational(2812, 4646864573)
|
||||
|
||||
def test_cdf():
|
||||
D = Die('D', 6)
|
||||
o = S.One
|
||||
|
||||
assert cdf(
|
||||
D) == sympify({1: o/6, 2: o/3, 3: o/2, 4: 2*o/3, 5: 5*o/6, 6: o})
|
||||
|
||||
|
||||
def test_coins():
|
||||
C, D = Coin('C'), Coin('D')
|
||||
H, T = symbols('H, T')
|
||||
assert P(Eq(C, D)) == S.Half
|
||||
assert density(Tuple(C, D)) == {(H, H): Rational(1, 4), (H, T): Rational(1, 4),
|
||||
(T, H): Rational(1, 4), (T, T): Rational(1, 4)}
|
||||
assert dict(density(C).items()) == {H: S.Half, T: S.Half}
|
||||
|
||||
F = Coin('F', Rational(1, 10))
|
||||
assert P(Eq(F, H)) == Rational(1, 10)
|
||||
|
||||
d = pspace(C).domain
|
||||
|
||||
assert d.as_boolean() == Or(Eq(C.symbol, H), Eq(C.symbol, T))
|
||||
|
||||
raises(ValueError, lambda: P(C > D)) # Can't intelligently compare H to T
|
||||
|
||||
def test_binomial_verify_parameters():
|
||||
raises(ValueError, lambda: Binomial('b', .2, .5))
|
||||
raises(ValueError, lambda: Binomial('b', 3, 1.5))
|
||||
|
||||
def test_binomial_numeric():
|
||||
nvals = range(5)
|
||||
pvals = [0, Rational(1, 4), S.Half, Rational(3, 4), 1]
|
||||
|
||||
for n in nvals:
|
||||
for p in pvals:
|
||||
X = Binomial('X', n, p)
|
||||
assert E(X) == n*p
|
||||
assert variance(X) == n*p*(1 - p)
|
||||
if n > 0 and 0 < p < 1:
|
||||
assert skewness(X) == (1 - 2*p)/sqrt(n*p*(1 - p))
|
||||
assert kurtosis(X) == 3 + (1 - 6*p*(1 - p))/(n*p*(1 - p))
|
||||
for k in range(n + 1):
|
||||
assert P(Eq(X, k)) == binomial(n, k)*p**k*(1 - p)**(n - k)
|
||||
|
||||
def test_binomial_quantile():
|
||||
X = Binomial('X', 50, S.Half)
|
||||
assert quantile(X)(0.95) == S(31)
|
||||
assert median(X) == FiniteSet(25)
|
||||
|
||||
X = Binomial('X', 5, S.Half)
|
||||
p = Symbol("p", positive=True)
|
||||
assert quantile(X)(p) == Piecewise((nan, p > S.One), (S.Zero, p <= Rational(1, 32)),\
|
||||
(S.One, p <= Rational(3, 16)), (S(2), p <= S.Half), (S(3), p <= Rational(13, 16)),\
|
||||
(S(4), p <= Rational(31, 32)), (S(5), p <= S.One))
|
||||
assert median(X) == FiniteSet(2, 3)
|
||||
|
||||
|
||||
def test_binomial_symbolic():
|
||||
n = 2
|
||||
p = symbols('p', positive=True)
|
||||
X = Binomial('X', n, p)
|
||||
t = Symbol('t')
|
||||
|
||||
assert simplify(E(X)) == n*p == simplify(moment(X, 1))
|
||||
assert simplify(variance(X)) == n*p*(1 - p) == simplify(cmoment(X, 2))
|
||||
assert cancel(skewness(X) - (1 - 2*p)/sqrt(n*p*(1 - p))) == 0
|
||||
assert cancel((kurtosis(X)) - (3 + (1 - 6*p*(1 - p))/(n*p*(1 - p)))) == 0
|
||||
assert characteristic_function(X)(t) == p ** 2 * exp(2 * I * t) + 2 * p * (-p + 1) * exp(I * t) + (-p + 1) ** 2
|
||||
assert moment_generating_function(X)(t) == p ** 2 * exp(2 * t) + 2 * p * (-p + 1) * exp(t) + (-p + 1) ** 2
|
||||
|
||||
# Test ability to change success/failure winnings
|
||||
H, T = symbols('H T')
|
||||
Y = Binomial('Y', n, p, succ=H, fail=T)
|
||||
assert simplify(E(Y) - (n*(H*p + T*(1 - p)))) == 0
|
||||
|
||||
# test symbolic dimensions
|
||||
n = symbols('n')
|
||||
B = Binomial('B', n, p)
|
||||
raises(NotImplementedError, lambda: P(B > 2))
|
||||
assert density(B).dict == Density(BinomialDistribution(n, p, 1, 0))
|
||||
assert set(density(B).dict.subs(n, 4).doit().keys()) == \
|
||||
{S.Zero, S.One, S(2), S(3), S(4)}
|
||||
assert set(density(B).dict.subs(n, 4).doit().values()) == \
|
||||
{(1 - p)**4, 4*p*(1 - p)**3, 6*p**2*(1 - p)**2, 4*p**3*(1 - p), p**4}
|
||||
k = Dummy('k', integer=True)
|
||||
assert E(B > 2).dummy_eq(
|
||||
Sum(Piecewise((k*p**k*(1 - p)**(-k + n)*binomial(n, k), (k >= 0)
|
||||
& (k <= n) & (k > 2)), (0, True)), (k, 0, n)))
|
||||
|
||||
def test_beta_binomial():
|
||||
# verify parameters
|
||||
raises(ValueError, lambda: BetaBinomial('b', .2, 1, 2))
|
||||
raises(ValueError, lambda: BetaBinomial('b', 2, -1, 2))
|
||||
raises(ValueError, lambda: BetaBinomial('b', 2, 1, -2))
|
||||
assert BetaBinomial('b', 2, 1, 1)
|
||||
|
||||
# test numeric values
|
||||
nvals = range(1,5)
|
||||
alphavals = [Rational(1, 4), S.Half, Rational(3, 4), 1, 10]
|
||||
betavals = [Rational(1, 4), S.Half, Rational(3, 4), 1, 10]
|
||||
|
||||
for n in nvals:
|
||||
for a in alphavals:
|
||||
for b in betavals:
|
||||
X = BetaBinomial('X', n, a, b)
|
||||
assert E(X) == moment(X, 1)
|
||||
assert variance(X) == cmoment(X, 2)
|
||||
|
||||
# test symbolic
|
||||
n, a, b = symbols('a b n')
|
||||
assert BetaBinomial('x', n, a, b)
|
||||
n = 2 # Because we're using for loops, can't do symbolic n
|
||||
a, b = symbols('a b', positive=True)
|
||||
X = BetaBinomial('X', n, a, b)
|
||||
t = Symbol('t')
|
||||
|
||||
assert E(X).expand() == moment(X, 1).expand()
|
||||
assert variance(X).expand() == cmoment(X, 2).expand()
|
||||
assert skewness(X) == smoment(X, 3)
|
||||
assert characteristic_function(X)(t) == exp(2*I*t)*beta(a + 2, b)/beta(a, b) +\
|
||||
2*exp(I*t)*beta(a + 1, b + 1)/beta(a, b) + beta(a, b + 2)/beta(a, b)
|
||||
assert moment_generating_function(X)(t) == exp(2*t)*beta(a + 2, b)/beta(a, b) +\
|
||||
2*exp(t)*beta(a + 1, b + 1)/beta(a, b) + beta(a, b + 2)/beta(a, b)
|
||||
|
||||
def test_hypergeometric_numeric():
|
||||
for N in range(1, 5):
|
||||
for m in range(0, N + 1):
|
||||
for n in range(1, N + 1):
|
||||
X = Hypergeometric('X', N, m, n)
|
||||
N, m, n = map(sympify, (N, m, n))
|
||||
assert sum(density(X).values()) == 1
|
||||
assert E(X) == n * m / N
|
||||
if N > 1:
|
||||
assert variance(X) == n*(m/N)*(N - m)/N*(N - n)/(N - 1)
|
||||
# Only test for skewness when defined
|
||||
if N > 2 and 0 < m < N and n < N:
|
||||
assert skewness(X) == simplify((N - 2*m)*sqrt(N - 1)*(N - 2*n)
|
||||
/ (sqrt(n*m*(N - m)*(N - n))*(N - 2)))
|
||||
|
||||
def test_hypergeometric_symbolic():
|
||||
N, m, n = symbols('N, m, n')
|
||||
H = Hypergeometric('H', N, m, n)
|
||||
dens = density(H).dict
|
||||
expec = E(H > 2)
|
||||
assert dens == Density(HypergeometricDistribution(N, m, n))
|
||||
assert dens.subs(N, 5).doit() == Density(HypergeometricDistribution(5, m, n))
|
||||
assert set(dens.subs({N: 3, m: 2, n: 1}).doit().keys()) == {S.Zero, S.One}
|
||||
assert set(dens.subs({N: 3, m: 2, n: 1}).doit().values()) == {Rational(1, 3), Rational(2, 3)}
|
||||
k = Dummy('k', integer=True)
|
||||
assert expec.dummy_eq(
|
||||
Sum(Piecewise((k*binomial(m, k)*binomial(N - m, -k + n)
|
||||
/binomial(N, n), k > 2), (0, True)), (k, 0, n)))
|
||||
|
||||
def test_rademacher():
|
||||
X = Rademacher('X')
|
||||
t = Symbol('t')
|
||||
|
||||
assert E(X) == 0
|
||||
assert variance(X) == 1
|
||||
assert density(X)[-1] == S.Half
|
||||
assert density(X)[1] == S.Half
|
||||
assert characteristic_function(X)(t) == exp(I*t)/2 + exp(-I*t)/2
|
||||
assert moment_generating_function(X)(t) == exp(t) / 2 + exp(-t) / 2
|
||||
|
||||
def test_ideal_soliton():
|
||||
raises(ValueError, lambda : IdealSoliton('sol', -12))
|
||||
raises(ValueError, lambda : IdealSoliton('sol', 13.2))
|
||||
raises(ValueError, lambda : IdealSoliton('sol', 0))
|
||||
f = Function('f')
|
||||
raises(ValueError, lambda : density(IdealSoliton('sol', 10)).pmf(f))
|
||||
|
||||
k = Symbol('k', integer=True, positive=True)
|
||||
x = Symbol('x', integer=True, positive=True)
|
||||
t = Symbol('t')
|
||||
sol = IdealSoliton('sol', k)
|
||||
assert density(sol).low == S.One
|
||||
assert density(sol).high == k
|
||||
assert density(sol).dict == Density(density(sol))
|
||||
assert density(sol).pmf(x) == Piecewise((1/k, Eq(x, 1)), (1/(x*(x - 1)), k >= x), (0, True))
|
||||
|
||||
k_vals = [5, 20, 50, 100, 1000]
|
||||
for i in k_vals:
|
||||
assert E(sol.subs(k, i)) == harmonic(i) == moment(sol.subs(k, i), 1)
|
||||
assert variance(sol.subs(k, i)) == (i - 1) + harmonic(i) - harmonic(i)**2 == cmoment(sol.subs(k, i),2)
|
||||
assert skewness(sol.subs(k, i)) == smoment(sol.subs(k, i), 3)
|
||||
assert kurtosis(sol.subs(k, i)) == smoment(sol.subs(k, i), 4)
|
||||
|
||||
assert exp(I*t)/10 + Sum(exp(I*t*x)/(x*x - x), (x, 2, k)).subs(k, 10).doit() == characteristic_function(sol.subs(k, 10))(t)
|
||||
assert exp(t)/10 + Sum(exp(t*x)/(x*x - x), (x, 2, k)).subs(k, 10).doit() == moment_generating_function(sol.subs(k, 10))(t)
|
||||
|
||||
def test_robust_soliton():
|
||||
raises(ValueError, lambda : RobustSoliton('robSol', -12, 0.1, 0.02))
|
||||
raises(ValueError, lambda : RobustSoliton('robSol', 13, 1.89, 0.1))
|
||||
raises(ValueError, lambda : RobustSoliton('robSol', 15, 0.6, -2.31))
|
||||
f = Function('f')
|
||||
raises(ValueError, lambda : density(RobustSoliton('robSol', 15, 0.6, 0.1)).pmf(f))
|
||||
|
||||
k = Symbol('k', integer=True, positive=True)
|
||||
delta = Symbol('delta', positive=True)
|
||||
c = Symbol('c', positive=True)
|
||||
robSol = RobustSoliton('robSol', k, delta, c)
|
||||
assert density(robSol).low == 1
|
||||
assert density(robSol).high == k
|
||||
|
||||
k_vals = [10, 20, 50]
|
||||
delta_vals = [0.2, 0.4, 0.6]
|
||||
c_vals = [0.01, 0.03, 0.05]
|
||||
for x in k_vals:
|
||||
for y in delta_vals:
|
||||
for z in c_vals:
|
||||
assert E(robSol.subs({k: x, delta: y, c: z})) == moment(robSol.subs({k: x, delta: y, c: z}), 1)
|
||||
assert variance(robSol.subs({k: x, delta: y, c: z})) == cmoment(robSol.subs({k: x, delta: y, c: z}), 2)
|
||||
assert skewness(robSol.subs({k: x, delta: y, c: z})) == smoment(robSol.subs({k: x, delta: y, c: z}), 3)
|
||||
assert kurtosis(robSol.subs({k: x, delta: y, c: z})) == smoment(robSol.subs({k: x, delta: y, c: z}), 4)
|
||||
|
||||
def test_FiniteRV():
|
||||
F = FiniteRV('F', {1: S.Half, 2: Rational(1, 4), 3: Rational(1, 4)}, check=True)
|
||||
p = Symbol("p", positive=True)
|
||||
|
||||
assert dict(density(F).items()) == {S.One: S.Half, S(2): Rational(1, 4), S(3): Rational(1, 4)}
|
||||
assert P(F >= 2) == S.Half
|
||||
assert quantile(F)(p) == Piecewise((nan, p > S.One), (S.One, p <= S.Half),\
|
||||
(S(2), p <= Rational(3, 4)),(S(3), True))
|
||||
|
||||
assert pspace(F).domain.as_boolean() == Or(
|
||||
*[Eq(F.symbol, i) for i in [1, 2, 3]])
|
||||
|
||||
assert F.pspace.domain.set == FiniteSet(1, 2, 3)
|
||||
raises(ValueError, lambda: FiniteRV('F', {1: S.Half, 2: S.Half, 3: S.Half}, check=True))
|
||||
raises(ValueError, lambda: FiniteRV('F', {1: S.Half, 2: Rational(-1, 2), 3: S.One}, check=True))
|
||||
raises(ValueError, lambda: FiniteRV('F', {1: S.One, 2: Rational(3, 2), 3: S.Zero,\
|
||||
4: Rational(-1, 2), 5: Rational(-3, 4), 6: Rational(-1, 4)}, check=True))
|
||||
|
||||
# purposeful invalid pmf but it should not raise since check=False
|
||||
# see test_drv_types.test_ContinuousRV for explanation
|
||||
X = FiniteRV('X', {1: 1, 2: 2})
|
||||
assert E(X) == 5
|
||||
assert P(X <= 2) + P(X > 2) != 1
|
||||
|
||||
def test_density_call():
|
||||
from sympy.abc import p
|
||||
x = Bernoulli('x', p)
|
||||
d = density(x)
|
||||
assert d(0) == 1 - p
|
||||
assert d(S.Zero) == 1 - p
|
||||
assert d(5) == 0
|
||||
|
||||
assert 0 in d
|
||||
assert 5 not in d
|
||||
assert d(S.Zero) == d[S.Zero]
|
||||
|
||||
|
||||
def test_DieDistribution():
|
||||
from sympy.abc import x
|
||||
X = DieDistribution(6)
|
||||
assert X.pmf(S.Half) is S.Zero
|
||||
assert X.pmf(x).subs({x: 1}).doit() == Rational(1, 6)
|
||||
assert X.pmf(x).subs({x: 7}).doit() == 0
|
||||
assert X.pmf(x).subs({x: -1}).doit() == 0
|
||||
assert X.pmf(x).subs({x: Rational(1, 3)}).doit() == 0
|
||||
raises(ValueError, lambda: X.pmf(Matrix([0, 0])))
|
||||
raises(ValueError, lambda: X.pmf(x**2 - 1))
|
||||
|
||||
def test_FinitePSpace():
|
||||
X = Die('X', 6)
|
||||
space = pspace(X)
|
||||
assert space.density == DieDistribution(6)
|
||||
|
||||
def test_symbolic_conditions():
|
||||
B = Bernoulli('B', Rational(1, 4))
|
||||
D = Die('D', 4)
|
||||
b, n = symbols('b, n')
|
||||
Y = P(Eq(B, b))
|
||||
Z = E(D > n)
|
||||
assert Y == \
|
||||
Piecewise((Rational(1, 4), Eq(b, 1)), (0, True)) + \
|
||||
Piecewise((Rational(3, 4), Eq(b, 0)), (0, True))
|
||||
assert Z == \
|
||||
Piecewise((Rational(1, 4), n < 1), (0, True)) + Piecewise((S.Half, n < 2), (0, True)) + \
|
||||
Piecewise((Rational(3, 4), n < 3), (0, True)) + Piecewise((S.One, n < 4), (0, True))
|
||||
@@ -0,0 +1,436 @@
|
||||
from sympy.concrete.products import Product
|
||||
from sympy.concrete.summations import Sum
|
||||
from sympy.core.numbers import (Rational, oo, pi)
|
||||
from sympy.core.relational import Eq
|
||||
from sympy.core.singleton import S
|
||||
from sympy.core.symbol import symbols
|
||||
from sympy.functions.combinatorial.factorials import (RisingFactorial, factorial)
|
||||
from sympy.functions.elementary.complexes import polar_lift
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.functions.elementary.miscellaneous import sqrt
|
||||
from sympy.functions.elementary.piecewise import Piecewise
|
||||
from sympy.functions.special.bessel import besselk
|
||||
from sympy.functions.special.gamma_functions import gamma
|
||||
from sympy.matrices.dense import eye
|
||||
from sympy.matrices.expressions.determinant import Determinant
|
||||
from sympy.sets.fancysets import Range
|
||||
from sympy.sets.sets import (Interval, ProductSet)
|
||||
from sympy.simplify.simplify import simplify
|
||||
from sympy.tensor.indexed import (Indexed, IndexedBase)
|
||||
from sympy.core.numbers import comp
|
||||
from sympy.integrals.integrals import integrate
|
||||
from sympy.matrices import Matrix, MatrixSymbol
|
||||
from sympy.matrices.expressions.matexpr import MatrixElement
|
||||
from sympy.stats import density, median, marginal_distribution, Normal, Laplace, E, sample
|
||||
from sympy.stats.joint_rv_types import (JointRV, MultivariateNormalDistribution,
|
||||
JointDistributionHandmade, MultivariateT, NormalGamma,
|
||||
GeneralizedMultivariateLogGammaOmega as GMVLGO, MultivariateBeta,
|
||||
GeneralizedMultivariateLogGamma as GMVLG, MultivariateEwens,
|
||||
Multinomial, NegativeMultinomial, MultivariateNormal,
|
||||
MultivariateLaplace)
|
||||
from sympy.testing.pytest import raises, XFAIL, skip, slow
|
||||
from sympy.external import import_module
|
||||
|
||||
from sympy.abc import x, y
|
||||
|
||||
|
||||
|
||||
def test_Normal():
|
||||
m = Normal('A', [1, 2], [[1, 0], [0, 1]])
|
||||
A = MultivariateNormal('A', [1, 2], [[1, 0], [0, 1]])
|
||||
assert m == A
|
||||
assert density(m)(1, 2) == 1/(2*pi)
|
||||
assert m.pspace.distribution.set == ProductSet(S.Reals, S.Reals)
|
||||
raises (ValueError, lambda:m[2])
|
||||
n = Normal('B', [1, 2, 3], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
|
||||
p = Normal('C', Matrix([1, 2]), Matrix([[1, 0], [0, 1]]))
|
||||
assert density(m)(x, y) == density(p)(x, y)
|
||||
assert marginal_distribution(n, 0, 1)(1, 2) == 1/(2*pi)
|
||||
raises(ValueError, lambda: marginal_distribution(m))
|
||||
assert integrate(density(m)(x, y), (x, -oo, oo), (y, -oo, oo)).evalf() == 1.0
|
||||
N = Normal('N', [1, 2], [[x, 0], [0, y]])
|
||||
assert density(N)(0, 0) == exp(-((4*x + y)/(2*x*y)))/(2*pi*sqrt(x*y))
|
||||
|
||||
raises (ValueError, lambda: Normal('M', [1, 2], [[1, 1], [1, -1]]))
|
||||
# symbolic
|
||||
n = symbols('n', integer=True, positive=True)
|
||||
mu = MatrixSymbol('mu', n, 1)
|
||||
sigma = MatrixSymbol('sigma', n, n)
|
||||
X = Normal('X', mu, sigma)
|
||||
assert density(X) == MultivariateNormalDistribution(mu, sigma)
|
||||
raises (NotImplementedError, lambda: median(m))
|
||||
# Below tests should work after issue #17267 is resolved
|
||||
# assert E(X) == mu
|
||||
# assert variance(X) == sigma
|
||||
|
||||
# test symbolic multivariate normal densities
|
||||
n = 3
|
||||
|
||||
Sg = MatrixSymbol('Sg', n, n)
|
||||
mu = MatrixSymbol('mu', n, 1)
|
||||
obs = MatrixSymbol('obs', n, 1)
|
||||
|
||||
X = MultivariateNormal('X', mu, Sg)
|
||||
density_X = density(X)
|
||||
|
||||
eval_a = density_X(obs).subs({Sg: eye(3),
|
||||
mu: Matrix([0, 0, 0]), obs: Matrix([0, 0, 0])}).doit()
|
||||
eval_b = density_X(0, 0, 0).subs({Sg: eye(3), mu: Matrix([0, 0, 0])}).doit()
|
||||
|
||||
assert eval_a == sqrt(2)/(4*pi**Rational(3/2))
|
||||
assert eval_b == sqrt(2)/(4*pi**Rational(3/2))
|
||||
|
||||
n = symbols('n', integer=True, positive=True)
|
||||
|
||||
Sg = MatrixSymbol('Sg', n, n)
|
||||
mu = MatrixSymbol('mu', n, 1)
|
||||
obs = MatrixSymbol('obs', n, 1)
|
||||
|
||||
X = MultivariateNormal('X', mu, Sg)
|
||||
density_X_at_obs = density(X)(obs)
|
||||
|
||||
expected_density = MatrixElement(
|
||||
exp((S(1)/2) * (mu.T - obs.T) * Sg**(-1) * (-mu + obs)) / \
|
||||
sqrt((2*pi)**n * Determinant(Sg)), 0, 0)
|
||||
|
||||
assert density_X_at_obs == expected_density
|
||||
|
||||
|
||||
def test_MultivariateTDist():
|
||||
t1 = MultivariateT('T', [0, 0], [[1, 0], [0, 1]], 2)
|
||||
assert(density(t1))(1, 1) == 1/(8*pi)
|
||||
assert t1.pspace.distribution.set == ProductSet(S.Reals, S.Reals)
|
||||
assert integrate(density(t1)(x, y), (x, -oo, oo), \
|
||||
(y, -oo, oo)).evalf() == 1.0
|
||||
raises(ValueError, lambda: MultivariateT('T', [1, 2], [[1, 1], [1, -1]], 1))
|
||||
t2 = MultivariateT('t2', [1, 2], [[x, 0], [0, y]], 1)
|
||||
assert density(t2)(1, 2) == 1/(2*pi*sqrt(x*y))
|
||||
|
||||
|
||||
def test_multivariate_laplace():
|
||||
raises(ValueError, lambda: Laplace('T', [1, 2], [[1, 2], [2, 1]]))
|
||||
L = Laplace('L', [1, 0], [[1, 0], [0, 1]])
|
||||
L2 = MultivariateLaplace('L2', [1, 0], [[1, 0], [0, 1]])
|
||||
assert density(L)(2, 3) == exp(2)*besselk(0, sqrt(39))/pi
|
||||
L1 = Laplace('L1', [1, 2], [[x, 0], [0, y]])
|
||||
assert density(L1)(0, 1) == \
|
||||
exp(2/y)*besselk(0, sqrt((2 + 4/y + 1/x)/y))/(pi*sqrt(x*y))
|
||||
assert L.pspace.distribution.set == ProductSet(S.Reals, S.Reals)
|
||||
assert L.pspace.distribution == L2.pspace.distribution
|
||||
|
||||
|
||||
def test_NormalGamma():
|
||||
ng = NormalGamma('G', 1, 2, 3, 4)
|
||||
assert density(ng)(1, 1) == 32*exp(-4)/sqrt(pi)
|
||||
assert ng.pspace.distribution.set == ProductSet(S.Reals, Interval(0, oo))
|
||||
raises(ValueError, lambda:NormalGamma('G', 1, 2, 3, -1))
|
||||
assert marginal_distribution(ng, 0)(1) == \
|
||||
3*sqrt(10)*gamma(Rational(7, 4))/(10*sqrt(pi)*gamma(Rational(5, 4)))
|
||||
assert marginal_distribution(ng, y)(1) == exp(Rational(-1, 4))/128
|
||||
assert marginal_distribution(ng,[0,1])(x) == x**2*exp(-x/4)/128
|
||||
|
||||
|
||||
def test_GeneralizedMultivariateLogGammaDistribution():
|
||||
h = S.Half
|
||||
omega = Matrix([[1, h, h, h],
|
||||
[h, 1, h, h],
|
||||
[h, h, 1, h],
|
||||
[h, h, h, 1]])
|
||||
v, l, mu = (4, [1, 2, 3, 4], [1, 2, 3, 4])
|
||||
y_1, y_2, y_3, y_4 = symbols('y_1:5', real=True)
|
||||
delta = symbols('d', positive=True)
|
||||
G = GMVLGO('G', omega, v, l, mu)
|
||||
Gd = GMVLG('Gd', delta, v, l, mu)
|
||||
dend = ("d**4*Sum(4*24**(-n - 4)*(1 - d)**n*exp((n + 4)*(y_1 + 2*y_2 + 3*y_3 "
|
||||
"+ 4*y_4) - exp(y_1) - exp(2*y_2)/2 - exp(3*y_3)/3 - exp(4*y_4)/4)/"
|
||||
"(gamma(n + 1)*gamma(n + 4)**3), (n, 0, oo))")
|
||||
assert str(density(Gd)(y_1, y_2, y_3, y_4)) == dend
|
||||
den = ("5*2**(2/3)*5**(1/3)*Sum(4*24**(-n - 4)*(-2**(2/3)*5**(1/3)/4 + 1)**n*"
|
||||
"exp((n + 4)*(y_1 + 2*y_2 + 3*y_3 + 4*y_4) - exp(y_1) - exp(2*y_2)/2 - "
|
||||
"exp(3*y_3)/3 - exp(4*y_4)/4)/(gamma(n + 1)*gamma(n + 4)**3), (n, 0, oo))/64")
|
||||
assert str(density(G)(y_1, y_2, y_3, y_4)) == den
|
||||
marg = ("5*2**(2/3)*5**(1/3)*exp(4*y_1)*exp(-exp(y_1))*Integral(exp(-exp(4*G[3])"
|
||||
"/4)*exp(16*G[3])*Integral(exp(-exp(3*G[2])/3)*exp(12*G[2])*Integral(exp("
|
||||
"-exp(2*G[1])/2)*exp(8*G[1])*Sum((-1/4)**n*(-4 + 2**(2/3)*5**(1/3"
|
||||
"))**n*exp(n*y_1)*exp(2*n*G[1])*exp(3*n*G[2])*exp(4*n*G[3])/(24**n*gamma(n + 1)"
|
||||
"*gamma(n + 4)**3), (n, 0, oo)), (G[1], -oo, oo)), (G[2], -oo, oo)), (G[3]"
|
||||
", -oo, oo))/5308416")
|
||||
assert str(marginal_distribution(G, G[0])(y_1)) == marg
|
||||
omega_f1 = Matrix([[1, h, h]])
|
||||
omega_f2 = Matrix([[1, h, h, h],
|
||||
[h, 1, 2, h],
|
||||
[h, h, 1, h],
|
||||
[h, h, h, 1]])
|
||||
omega_f3 = Matrix([[6, h, h, h],
|
||||
[h, 1, 2, h],
|
||||
[h, h, 1, h],
|
||||
[h, h, h, 1]])
|
||||
v_f = symbols("v_f", positive=False, real=True)
|
||||
l_f = [1, 2, v_f, 4]
|
||||
m_f = [v_f, 2, 3, 4]
|
||||
omega_f4 = Matrix([[1, h, h, h, h],
|
||||
[h, 1, h, h, h],
|
||||
[h, h, 1, h, h],
|
||||
[h, h, h, 1, h],
|
||||
[h, h, h, h, 1]])
|
||||
l_f1 = [1, 2, 3, 4, 5]
|
||||
omega_f5 = Matrix([[1]])
|
||||
mu_f5 = l_f5 = [1]
|
||||
|
||||
raises(ValueError, lambda: GMVLGO('G', omega_f1, v, l, mu))
|
||||
raises(ValueError, lambda: GMVLGO('G', omega_f2, v, l, mu))
|
||||
raises(ValueError, lambda: GMVLGO('G', omega_f3, v, l, mu))
|
||||
raises(ValueError, lambda: GMVLGO('G', omega, v_f, l, mu))
|
||||
raises(ValueError, lambda: GMVLGO('G', omega, v, l_f, mu))
|
||||
raises(ValueError, lambda: GMVLGO('G', omega, v, l, m_f))
|
||||
raises(ValueError, lambda: GMVLGO('G', omega_f4, v, l, mu))
|
||||
raises(ValueError, lambda: GMVLGO('G', omega, v, l_f1, mu))
|
||||
raises(ValueError, lambda: GMVLGO('G', omega_f5, v, l_f5, mu_f5))
|
||||
raises(ValueError, lambda: GMVLG('G', Rational(3, 2), v, l, mu))
|
||||
|
||||
|
||||
def test_MultivariateBeta():
|
||||
a1, a2 = symbols('a1, a2', positive=True)
|
||||
a1_f, a2_f = symbols('a1, a2', positive=False, real=True)
|
||||
mb = MultivariateBeta('B', [a1, a2])
|
||||
mb_c = MultivariateBeta('C', a1, a2)
|
||||
assert density(mb)(1, 2) == S(2)**(a2 - 1)*gamma(a1 + a2)/\
|
||||
(gamma(a1)*gamma(a2))
|
||||
assert marginal_distribution(mb_c, 0)(3) == S(3)**(a1 - 1)*gamma(a1 + a2)/\
|
||||
(a2*gamma(a1)*gamma(a2))
|
||||
raises(ValueError, lambda: MultivariateBeta('b1', [a1_f, a2]))
|
||||
raises(ValueError, lambda: MultivariateBeta('b2', [a1, a2_f]))
|
||||
raises(ValueError, lambda: MultivariateBeta('b3', [0, 0]))
|
||||
raises(ValueError, lambda: MultivariateBeta('b4', [a1_f, a2_f]))
|
||||
assert mb.pspace.distribution.set == ProductSet(Interval(0, 1), Interval(0, 1))
|
||||
|
||||
|
||||
def test_MultivariateEwens():
|
||||
n, theta, i = symbols('n theta i', positive=True)
|
||||
|
||||
# tests for integer dimensions
|
||||
theta_f = symbols('t_f', negative=True)
|
||||
a = symbols('a_1:4', positive = True, integer = True)
|
||||
ed = MultivariateEwens('E', 3, theta)
|
||||
assert density(ed)(a[0], a[1], a[2]) == Piecewise((6*2**(-a[1])*3**(-a[2])*
|
||||
theta**a[0]*theta**a[1]*theta**a[2]/
|
||||
(theta*(theta + 1)*(theta + 2)*
|
||||
factorial(a[0])*factorial(a[1])*
|
||||
factorial(a[2])), Eq(a[0] + 2*a[1] +
|
||||
3*a[2], 3)), (0, True))
|
||||
assert marginal_distribution(ed, ed[1])(a[1]) == Piecewise((6*2**(-a[1])*
|
||||
theta**a[1]/((theta + 1)*
|
||||
(theta + 2)*factorial(a[1])),
|
||||
Eq(2*a[1] + 1, 3)), (0, True))
|
||||
raises(ValueError, lambda: MultivariateEwens('e1', 5, theta_f))
|
||||
assert ed.pspace.distribution.set == ProductSet(Range(0, 4, 1),
|
||||
Range(0, 2, 1), Range(0, 2, 1))
|
||||
|
||||
# tests for symbolic dimensions
|
||||
eds = MultivariateEwens('E', n, theta)
|
||||
a = IndexedBase('a')
|
||||
j, k = symbols('j, k')
|
||||
den = Piecewise((factorial(n)*Product(theta**a[j]*(j + 1)**(-a[j])/
|
||||
factorial(a[j]), (j, 0, n - 1))/RisingFactorial(theta, n),
|
||||
Eq(n, Sum((k + 1)*a[k], (k, 0, n - 1)))), (0, True))
|
||||
assert density(eds)(a).dummy_eq(den)
|
||||
|
||||
|
||||
def test_Multinomial():
|
||||
n, x1, x2, x3, x4 = symbols('n, x1, x2, x3, x4', nonnegative=True, integer=True)
|
||||
p1, p2, p3, p4 = symbols('p1, p2, p3, p4', positive=True)
|
||||
p1_f, n_f = symbols('p1_f, n_f', negative=True)
|
||||
M = Multinomial('M', n, [p1, p2, p3, p4])
|
||||
C = Multinomial('C', 3, p1, p2, p3)
|
||||
f = factorial
|
||||
assert density(M)(x1, x2, x3, x4) == Piecewise((p1**x1*p2**x2*p3**x3*p4**x4*
|
||||
f(n)/(f(x1)*f(x2)*f(x3)*f(x4)),
|
||||
Eq(n, x1 + x2 + x3 + x4)), (0, True))
|
||||
assert marginal_distribution(C, C[0])(x1).subs(x1, 1) ==\
|
||||
3*p1*p2**2 +\
|
||||
6*p1*p2*p3 +\
|
||||
3*p1*p3**2
|
||||
raises(ValueError, lambda: Multinomial('b1', 5, [p1, p2, p3, p1_f]))
|
||||
raises(ValueError, lambda: Multinomial('b2', n_f, [p1, p2, p3, p4]))
|
||||
raises(ValueError, lambda: Multinomial('b3', n, 0.5, 0.4, 0.3, 0.1))
|
||||
|
||||
|
||||
def test_NegativeMultinomial():
|
||||
k0, x1, x2, x3, x4 = symbols('k0, x1, x2, x3, x4', nonnegative=True, integer=True)
|
||||
p1, p2, p3, p4 = symbols('p1, p2, p3, p4', positive=True)
|
||||
p1_f = symbols('p1_f', negative=True)
|
||||
N = NegativeMultinomial('N', 4, [p1, p2, p3, p4])
|
||||
C = NegativeMultinomial('C', 4, 0.1, 0.2, 0.3)
|
||||
g = gamma
|
||||
f = factorial
|
||||
assert simplify(density(N)(x1, x2, x3, x4) -
|
||||
p1**x1*p2**x2*p3**x3*p4**x4*(-p1 - p2 - p3 - p4 + 1)**4*g(x1 + x2 +
|
||||
x3 + x4 + 4)/(6*f(x1)*f(x2)*f(x3)*f(x4))) is S.Zero
|
||||
assert comp(marginal_distribution(C, C[0])(1).evalf(), 0.33, .01)
|
||||
raises(ValueError, lambda: NegativeMultinomial('b1', 5, [p1, p2, p3, p1_f]))
|
||||
raises(ValueError, lambda: NegativeMultinomial('b2', k0, 0.5, 0.4, 0.3, 0.4))
|
||||
assert N.pspace.distribution.set == ProductSet(Range(0, oo, 1),
|
||||
Range(0, oo, 1), Range(0, oo, 1), Range(0, oo, 1))
|
||||
|
||||
|
||||
@slow
|
||||
def test_JointPSpace_marginal_distribution():
|
||||
T = MultivariateT('T', [0, 0], [[1, 0], [0, 1]], 2)
|
||||
got = marginal_distribution(T, T[1])(x)
|
||||
ans = sqrt(2)*(x**2/2 + 1)/(4*polar_lift(x**2/2 + 1)**(S(5)/2))
|
||||
assert got == ans, got
|
||||
assert integrate(marginal_distribution(T, 1)(x), (x, -oo, oo)) == 1
|
||||
|
||||
t = MultivariateT('T', [0, 0, 0], [[1, 0, 0], [0, 1, 0], [0, 0, 1]], 3)
|
||||
assert comp(marginal_distribution(t, 0)(1).evalf(), 0.2, .01)
|
||||
|
||||
|
||||
def test_JointRV():
|
||||
x1, x2 = (Indexed('x', i) for i in (1, 2))
|
||||
pdf = exp(-x1**2/2 + x1 - x2**2/2 - S.Half)/(2*pi)
|
||||
X = JointRV('x', pdf)
|
||||
assert density(X)(1, 2) == exp(-2)/(2*pi)
|
||||
assert isinstance(X.pspace.distribution, JointDistributionHandmade)
|
||||
assert marginal_distribution(X, 0)(2) == sqrt(2)*exp(Rational(-1, 2))/(2*sqrt(pi))
|
||||
|
||||
|
||||
def test_expectation():
|
||||
m = Normal('A', [x, y], [[1, 0], [0, 1]])
|
||||
assert simplify(E(m[1])) == y
|
||||
|
||||
|
||||
@XFAIL
|
||||
def test_joint_vector_expectation():
|
||||
m = Normal('A', [x, y], [[1, 0], [0, 1]])
|
||||
assert E(m) == (x, y)
|
||||
|
||||
|
||||
def test_sample_numpy():
|
||||
distribs_numpy = [
|
||||
MultivariateNormal("M", [3, 4], [[2, 1], [1, 2]]),
|
||||
MultivariateBeta("B", [0.4, 5, 15, 50, 203]),
|
||||
Multinomial("N", 50, [0.3, 0.2, 0.1, 0.25, 0.15])
|
||||
]
|
||||
size = 3
|
||||
numpy = import_module('numpy')
|
||||
if not numpy:
|
||||
skip('Numpy is not installed. Abort tests for _sample_numpy.')
|
||||
else:
|
||||
for X in distribs_numpy:
|
||||
samps = sample(X, size=size, library='numpy')
|
||||
for sam in samps:
|
||||
assert tuple(sam) in X.pspace.distribution.set
|
||||
N_c = NegativeMultinomial('N', 3, 0.1, 0.1, 0.1)
|
||||
raises(NotImplementedError, lambda: sample(N_c, library='numpy'))
|
||||
|
||||
|
||||
def test_sample_scipy():
|
||||
distribs_scipy = [
|
||||
MultivariateNormal("M", [0, 0], [[0.1, 0.025], [0.025, 0.1]]),
|
||||
MultivariateBeta("B", [0.4, 5, 15]),
|
||||
Multinomial("N", 8, [0.3, 0.2, 0.1, 0.4])
|
||||
]
|
||||
|
||||
size = 3
|
||||
scipy = import_module('scipy')
|
||||
if not scipy:
|
||||
skip('Scipy not installed. Abort tests for _sample_scipy.')
|
||||
else:
|
||||
for X in distribs_scipy:
|
||||
samps = sample(X, size=size)
|
||||
samps2 = sample(X, size=(2, 2))
|
||||
for sam in samps:
|
||||
assert tuple(sam) in X.pspace.distribution.set
|
||||
for i in range(2):
|
||||
for j in range(2):
|
||||
assert tuple(samps2[i][j]) in X.pspace.distribution.set
|
||||
N_c = NegativeMultinomial('N', 3, 0.1, 0.1, 0.1)
|
||||
raises(NotImplementedError, lambda: sample(N_c))
|
||||
|
||||
|
||||
def test_sample_pymc():
|
||||
distribs_pymc = [
|
||||
MultivariateNormal("M", [5, 2], [[1, 0], [0, 1]]),
|
||||
MultivariateBeta("B", [0.4, 5, 15]),
|
||||
Multinomial("N", 4, [0.3, 0.2, 0.1, 0.4])
|
||||
]
|
||||
size = 3
|
||||
pymc = import_module('pymc')
|
||||
if not pymc:
|
||||
skip('PyMC is not installed. Abort tests for _sample_pymc.')
|
||||
else:
|
||||
for X in distribs_pymc:
|
||||
samps = sample(X, size=size, library='pymc')
|
||||
for sam in samps:
|
||||
assert tuple(sam.flatten()) in X.pspace.distribution.set
|
||||
N_c = NegativeMultinomial('N', 3, 0.1, 0.1, 0.1)
|
||||
raises(NotImplementedError, lambda: sample(N_c, library='pymc'))
|
||||
|
||||
|
||||
def test_sample_seed():
|
||||
x1, x2 = (Indexed('x', i) for i in (1, 2))
|
||||
pdf = exp(-x1**2/2 + x1 - x2**2/2 - S.Half)/(2*pi)
|
||||
X = JointRV('x', pdf)
|
||||
|
||||
libraries = ['scipy', 'numpy', 'pymc']
|
||||
for lib in libraries:
|
||||
try:
|
||||
imported_lib = import_module(lib)
|
||||
if imported_lib:
|
||||
s0, s1, s2 = [], [], []
|
||||
s0 = sample(X, size=10, library=lib, seed=0)
|
||||
s1 = sample(X, size=10, library=lib, seed=0)
|
||||
s2 = sample(X, size=10, library=lib, seed=1)
|
||||
assert all(s0 == s1)
|
||||
assert all(s1 != s2)
|
||||
except NotImplementedError:
|
||||
continue
|
||||
|
||||
#
|
||||
# XXX: This fails for pymc. Previously the test appeared to pass but that is
|
||||
# just because the library argument was not passed so the test always used
|
||||
# scipy.
|
||||
#
|
||||
def test_issue_21057():
|
||||
m = Normal("x", [0, 0], [[0, 0], [0, 0]])
|
||||
n = MultivariateNormal("x", [0, 0], [[0, 0], [0, 0]])
|
||||
p = Normal("x", [0, 0], [[0, 0], [0, 1]])
|
||||
assert m == n
|
||||
libraries = ('scipy', 'numpy') # , 'pymc') # <-- pymc fails
|
||||
for library in libraries:
|
||||
try:
|
||||
imported_lib = import_module(library)
|
||||
if imported_lib:
|
||||
s1 = sample(m, size=8, library=library)
|
||||
s2 = sample(n, size=8, library=library)
|
||||
s3 = sample(p, size=8, library=library)
|
||||
assert tuple(s1.flatten()) == tuple(s2.flatten())
|
||||
for s in s3:
|
||||
assert tuple(s.flatten()) in p.pspace.distribution.set
|
||||
except NotImplementedError:
|
||||
continue
|
||||
|
||||
|
||||
#
|
||||
# When this passes the pymc part can be uncommented in test_issue_21057 above
|
||||
# and this can be deleted.
|
||||
#
|
||||
@XFAIL
|
||||
def test_issue_21057_pymc():
|
||||
m = Normal("x", [0, 0], [[0, 0], [0, 0]])
|
||||
n = MultivariateNormal("x", [0, 0], [[0, 0], [0, 0]])
|
||||
p = Normal("x", [0, 0], [[0, 0], [0, 1]])
|
||||
assert m == n
|
||||
libraries = ('pymc',)
|
||||
for library in libraries:
|
||||
try:
|
||||
imported_lib = import_module(library)
|
||||
if imported_lib:
|
||||
s1 = sample(m, size=8, library=library)
|
||||
s2 = sample(n, size=8, library=library)
|
||||
s3 = sample(p, size=8, library=library)
|
||||
assert tuple(s1.flatten()) == tuple(s2.flatten())
|
||||
for s in s3:
|
||||
assert tuple(s.flatten()) in p.pspace.distribution.set
|
||||
except NotImplementedError:
|
||||
continue
|
||||
@@ -0,0 +1,186 @@
|
||||
from sympy.concrete.products import Product
|
||||
from sympy.core.numbers import pi
|
||||
from sympy.core.singleton import S
|
||||
from sympy.core.symbol import (Dummy, symbols)
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.functions.elementary.miscellaneous import sqrt
|
||||
from sympy.functions.special.gamma_functions import gamma
|
||||
from sympy.matrices import Determinant, Matrix, Trace, MatrixSymbol, MatrixSet
|
||||
from sympy.stats import density, sample
|
||||
from sympy.stats.matrix_distributions import (MatrixGammaDistribution,
|
||||
MatrixGamma, MatrixPSpace, Wishart, MatrixNormal, MatrixStudentT)
|
||||
from sympy.testing.pytest import raises, skip
|
||||
from sympy.external import import_module
|
||||
|
||||
|
||||
def test_MatrixPSpace():
|
||||
M = MatrixGammaDistribution(1, 2, [[2, 1], [1, 2]])
|
||||
MP = MatrixPSpace('M', M, 2, 2)
|
||||
assert MP.distribution == M
|
||||
raises(ValueError, lambda: MatrixPSpace('M', M, 1.2, 2))
|
||||
|
||||
def test_MatrixGamma():
|
||||
M = MatrixGamma('M', 1, 2, [[1, 0], [0, 1]])
|
||||
assert M.pspace.distribution.set == MatrixSet(2, 2, S.Reals)
|
||||
assert isinstance(density(M), MatrixGammaDistribution)
|
||||
X = MatrixSymbol('X', 2, 2)
|
||||
num = exp(Trace(Matrix([[-S(1)/2, 0], [0, -S(1)/2]])*X))
|
||||
assert density(M)(X).doit() == num/(4*pi*sqrt(Determinant(X)))
|
||||
assert density(M)([[2, 1], [1, 2]]).doit() == sqrt(3)*exp(-2)/(12*pi)
|
||||
X = MatrixSymbol('X', 1, 2)
|
||||
Y = MatrixSymbol('Y', 1, 2)
|
||||
assert density(M)([X, Y]).doit() == exp(-X[0, 0]/2 - Y[0, 1]/2)/(4*pi*sqrt(
|
||||
X[0, 0]*Y[0, 1] - X[0, 1]*Y[0, 0]))
|
||||
# symbolic
|
||||
a, b = symbols('a b', positive=True)
|
||||
d = symbols('d', positive=True, integer=True)
|
||||
Y = MatrixSymbol('Y', d, d)
|
||||
Z = MatrixSymbol('Z', 2, 2)
|
||||
SM = MatrixSymbol('SM', d, d)
|
||||
M2 = MatrixGamma('M2', a, b, SM)
|
||||
M3 = MatrixGamma('M3', 2, 3, [[2, 1], [1, 2]])
|
||||
k = Dummy('k')
|
||||
exprd = pi**(-d*(d - 1)/4)*b**(-a*d)*exp(Trace((-1/b)*SM**(-1)*Y)
|
||||
)*Determinant(SM)**(-a)*Determinant(Y)**(a - d/2 - S(1)/2)/Product(
|
||||
gamma(-k/2 + a + S(1)/2), (k, 1, d))
|
||||
assert density(M2)(Y).dummy_eq(exprd)
|
||||
raises(NotImplementedError, lambda: density(M3 + M)(Z))
|
||||
raises(ValueError, lambda: density(M)(1))
|
||||
raises(ValueError, lambda: MatrixGamma('M', -1, 2, [[1, 0], [0, 1]]))
|
||||
raises(ValueError, lambda: MatrixGamma('M', -1, -2, [[1, 0], [0, 1]]))
|
||||
raises(ValueError, lambda: MatrixGamma('M', -1, 2, [[1, 0], [2, 1]]))
|
||||
raises(ValueError, lambda: MatrixGamma('M', -1, 2, [[1, 0], [0]]))
|
||||
|
||||
def test_Wishart():
|
||||
W = Wishart('W', 5, [[1, 0], [0, 1]])
|
||||
assert W.pspace.distribution.set == MatrixSet(2, 2, S.Reals)
|
||||
X = MatrixSymbol('X', 2, 2)
|
||||
term1 = exp(Trace(Matrix([[-S(1)/2, 0], [0, -S(1)/2]])*X))
|
||||
assert density(W)(X).doit() == term1 * Determinant(X)/(24*pi)
|
||||
assert density(W)([[2, 1], [1, 2]]).doit() == exp(-2)/(8*pi)
|
||||
n = symbols('n', positive=True)
|
||||
d = symbols('d', positive=True, integer=True)
|
||||
Y = MatrixSymbol('Y', d, d)
|
||||
SM = MatrixSymbol('SM', d, d)
|
||||
W = Wishart('W', n, SM)
|
||||
k = Dummy('k')
|
||||
exprd = 2**(-d*n/2)*pi**(-d*(d - 1)/4)*exp(Trace(-(S(1)/2)*SM**(-1)*Y)
|
||||
)*Determinant(SM)**(-n/2)*Determinant(Y)**(
|
||||
-d/2 + n/2 - S(1)/2)/Product(gamma(-k/2 + n/2 + S(1)/2), (k, 1, d))
|
||||
assert density(W)(Y).dummy_eq(exprd)
|
||||
raises(ValueError, lambda: density(W)(1))
|
||||
raises(ValueError, lambda: Wishart('W', -1, [[1, 0], [0, 1]]))
|
||||
raises(ValueError, lambda: Wishart('W', -1, [[1, 0], [2, 1]]))
|
||||
raises(ValueError, lambda: Wishart('W', 2, [[1, 0], [0]]))
|
||||
|
||||
def test_MatrixNormal():
|
||||
M = MatrixNormal('M', [[5, 6]], [4], [[2, 1], [1, 2]])
|
||||
assert M.pspace.distribution.set == MatrixSet(1, 2, S.Reals)
|
||||
X = MatrixSymbol('X', 1, 2)
|
||||
term1 = exp(-Trace(Matrix([[ S(2)/3, -S(1)/3], [-S(1)/3, S(2)/3]])*(
|
||||
Matrix([[-5], [-6]]) + X.T)*Matrix([[S(1)/4]])*(Matrix([[-5, -6]]) + X))/2)
|
||||
assert density(M)(X).doit() == (sqrt(3)) * term1/(24*pi)
|
||||
assert density(M)([[7, 8]]).doit() == sqrt(3)*exp(-S(1)/3)/(24*pi)
|
||||
d, n = symbols('d n', positive=True, integer=True)
|
||||
SM2 = MatrixSymbol('SM2', d, d)
|
||||
SM1 = MatrixSymbol('SM1', n, n)
|
||||
LM = MatrixSymbol('LM', n, d)
|
||||
Y = MatrixSymbol('Y', n, d)
|
||||
M = MatrixNormal('M', LM, SM1, SM2)
|
||||
exprd = (2*pi)**(-d*n/2)*exp(-Trace(SM2**(-1)*(-LM.T + Y.T)*SM1**(-1)*(-LM + Y)
|
||||
)/2)*Determinant(SM1)**(-d/2)*Determinant(SM2)**(-n/2)
|
||||
assert density(M)(Y).doit() == exprd
|
||||
raises(ValueError, lambda: density(M)(1))
|
||||
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [0, 1]], [[1, 0], [2, 1]]))
|
||||
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [2, 1]], [[1, 0], [0, 1]]))
|
||||
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [0, 1]], [[1, 0], [0, 1]]))
|
||||
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [2]], [[1, 0], [0, 1]]))
|
||||
raises(ValueError, lambda: MatrixNormal('M', [1, 2], [[1, 0], [2, 1]], [[1, 0], [0]]))
|
||||
raises(ValueError, lambda: MatrixNormal('M', [[1, 2]], [[1, 0], [0, 1]], [[1, 0]]))
|
||||
raises(ValueError, lambda: MatrixNormal('M', [[1, 2]], [1], [[1, 0]]))
|
||||
|
||||
def test_MatrixStudentT():
|
||||
M = MatrixStudentT('M', 2, [[5, 6]], [[2, 1], [1, 2]], [4])
|
||||
assert M.pspace.distribution.set == MatrixSet(1, 2, S.Reals)
|
||||
X = MatrixSymbol('X', 1, 2)
|
||||
D = pi ** (-1.0) * Determinant(Matrix([[4]])) ** (-1.0) * Determinant(Matrix([[2, 1], [1, 2]])) \
|
||||
** (-0.5) / Determinant(Matrix([[S(1) / 4]]) * (Matrix([[-5, -6]]) + X)
|
||||
* Matrix([[S(2) / 3, -S(1) / 3], [-S(1) / 3, S(2) / 3]]) * (
|
||||
Matrix([[-5], [-6]]) + X.T) + Matrix([[1]])) ** 2
|
||||
assert density(M)(X) == D
|
||||
|
||||
v = symbols('v', positive=True)
|
||||
n, p = 1, 2
|
||||
Omega = MatrixSymbol('Omega', p, p)
|
||||
Sigma = MatrixSymbol('Sigma', n, n)
|
||||
Location = MatrixSymbol('Location', n, p)
|
||||
Y = MatrixSymbol('Y', n, p)
|
||||
M = MatrixStudentT('M', v, Location, Omega, Sigma)
|
||||
|
||||
exprd = gamma(v/2 + 1)*Determinant(Matrix([[1]]) + Sigma**(-1)*(-Location + Y)*Omega**(-1)*(-Location.T + Y.T))**(-v/2 - 1) / \
|
||||
(pi*gamma(v/2)*sqrt(Determinant(Omega))*Determinant(Sigma))
|
||||
|
||||
assert density(M)(Y) == exprd
|
||||
raises(ValueError, lambda: density(M)(1))
|
||||
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [0, 1]], [[1, 0], [2, 1]]))
|
||||
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [2, 1]], [[1, 0], [0, 1]]))
|
||||
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [0, 1]], [[1, 0], [0, 1]]))
|
||||
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [2]], [[1, 0], [0, 1]]))
|
||||
raises(ValueError, lambda: MatrixStudentT('M', 1, [1, 2], [[1, 0], [2, 1]], [[1], [2]]))
|
||||
raises(ValueError, lambda: MatrixStudentT('M', 1, [[1, 2]], [[1, 0], [0, 1]], [[1, 0]]))
|
||||
raises(ValueError, lambda: MatrixStudentT('M', 1, [[1, 2]], [1], [[1, 0]]))
|
||||
raises(ValueError, lambda: MatrixStudentT('M', -1, [1, 2], [[1, 0], [0, 1]], [4]))
|
||||
|
||||
def test_sample_scipy():
|
||||
distribs_scipy = [
|
||||
MatrixNormal('M', [[5, 6]], [4], [[2, 1], [1, 2]]),
|
||||
Wishart('W', 5, [[1, 0], [0, 1]])
|
||||
]
|
||||
|
||||
size = 5
|
||||
scipy = import_module('scipy')
|
||||
if not scipy:
|
||||
skip('Scipy not installed. Abort tests for _sample_scipy.')
|
||||
else:
|
||||
for X in distribs_scipy:
|
||||
samps = sample(X, size=size)
|
||||
for sam in samps:
|
||||
assert Matrix(sam) in X.pspace.distribution.set
|
||||
M = MatrixGamma('M', 1, 2, [[1, 0], [0, 1]])
|
||||
raises(NotImplementedError, lambda: sample(M, size=3))
|
||||
|
||||
def test_sample_pymc():
|
||||
distribs_pymc = [
|
||||
MatrixNormal('M', [[5, 6], [3, 4]], [[1, 0], [0, 1]], [[2, 1], [1, 2]]),
|
||||
Wishart('W', 7, [[2, 1], [1, 2]])
|
||||
]
|
||||
size = 3
|
||||
pymc = import_module('pymc')
|
||||
if not pymc:
|
||||
skip('PyMC is not installed. Abort tests for _sample_pymc.')
|
||||
else:
|
||||
for X in distribs_pymc:
|
||||
samps = sample(X, size=size, library='pymc')
|
||||
for sam in samps:
|
||||
assert Matrix(sam) in X.pspace.distribution.set
|
||||
M = MatrixGamma('M', 1, 2, [[1, 0], [0, 1]])
|
||||
raises(NotImplementedError, lambda: sample(M, size=3))
|
||||
|
||||
def test_sample_seed():
|
||||
X = MatrixNormal('M', [[5, 6], [3, 4]], [[1, 0], [0, 1]], [[2, 1], [1, 2]])
|
||||
|
||||
libraries = ['scipy', 'numpy', 'pymc']
|
||||
for lib in libraries:
|
||||
try:
|
||||
imported_lib = import_module(lib)
|
||||
if imported_lib:
|
||||
s0, s1, s2 = [], [], []
|
||||
s0 = sample(X, size=10, library=lib, seed=0)
|
||||
s1 = sample(X, size=10, library=lib, seed=0)
|
||||
s2 = sample(X, size=10, library=lib, seed=1)
|
||||
for i in range(10):
|
||||
assert (s0[i] == s1[i]).all()
|
||||
assert (s1[i] != s2[i]).all()
|
||||
|
||||
except NotImplementedError:
|
||||
continue
|
||||
@@ -0,0 +1,82 @@
|
||||
from sympy.concrete.summations import Sum
|
||||
from sympy.core.add import Add
|
||||
from sympy.core.mul import Mul
|
||||
from sympy.core.numbers import (Integer, oo, pi)
|
||||
from sympy.core.power import Pow
|
||||
from sympy.core.relational import (Eq, Ne)
|
||||
from sympy.core.symbol import (Dummy, Symbol, symbols)
|
||||
from sympy.functions.combinatorial.factorials import factorial
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.functions.elementary.miscellaneous import sqrt
|
||||
from sympy.functions.elementary.piecewise import Piecewise
|
||||
from sympy.functions.special.delta_functions import DiracDelta
|
||||
from sympy.functions.special.gamma_functions import gamma
|
||||
from sympy.integrals.integrals import Integral
|
||||
from sympy.simplify.simplify import simplify
|
||||
from sympy.tensor.indexed import (Indexed, IndexedBase)
|
||||
from sympy.functions.elementary.piecewise import ExprCondPair
|
||||
from sympy.stats import (Poisson, Beta, Exponential, P,
|
||||
Multinomial, MultivariateBeta)
|
||||
from sympy.stats.crv_types import Normal
|
||||
from sympy.stats.drv_types import PoissonDistribution
|
||||
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
|
||||
from sympy.stats.joint_rv import MarginalDistribution
|
||||
from sympy.stats.rv import pspace, density
|
||||
from sympy.testing.pytest import ignore_warnings
|
||||
|
||||
def test_density():
|
||||
x = Symbol('x')
|
||||
l = Symbol('l', positive=True)
|
||||
rate = Beta(l, 2, 3)
|
||||
X = Poisson(x, rate)
|
||||
assert isinstance(pspace(X), CompoundPSpace)
|
||||
assert density(X, Eq(rate, rate.symbol)) == PoissonDistribution(l)
|
||||
N1 = Normal('N1', 0, 1)
|
||||
N2 = Normal('N2', N1, 2)
|
||||
assert density(N2)(0).doit() == sqrt(10)/(10*sqrt(pi))
|
||||
assert simplify(density(N2, Eq(N1, 1))(x)) == \
|
||||
sqrt(2)*exp(-(x - 1)**2/8)/(4*sqrt(pi))
|
||||
assert simplify(density(N2)(x)) == sqrt(10)*exp(-x**2/10)/(10*sqrt(pi))
|
||||
|
||||
def test_MarginalDistribution():
|
||||
a1, p1, p2 = symbols('a1 p1 p2', positive=True)
|
||||
C = Multinomial('C', 2, p1, p2)
|
||||
B = MultivariateBeta('B', a1, C[0])
|
||||
MGR = MarginalDistribution(B, (C[0],))
|
||||
mgrc = Mul(Symbol('B'), Piecewise(ExprCondPair(Mul(Integer(2),
|
||||
Pow(Symbol('p1', positive=True), Indexed(IndexedBase(Symbol('C')),
|
||||
Integer(0))), Pow(Symbol('p2', positive=True),
|
||||
Indexed(IndexedBase(Symbol('C')), Integer(1))),
|
||||
Pow(factorial(Indexed(IndexedBase(Symbol('C')), Integer(0))), Integer(-1)),
|
||||
Pow(factorial(Indexed(IndexedBase(Symbol('C')), Integer(1))), Integer(-1))),
|
||||
Eq(Add(Indexed(IndexedBase(Symbol('C')), Integer(0)),
|
||||
Indexed(IndexedBase(Symbol('C')), Integer(1))), Integer(2))),
|
||||
ExprCondPair(Integer(0), True)), Pow(gamma(Symbol('a1', positive=True)),
|
||||
Integer(-1)), gamma(Add(Symbol('a1', positive=True),
|
||||
Indexed(IndexedBase(Symbol('C')), Integer(0)))),
|
||||
Pow(gamma(Indexed(IndexedBase(Symbol('C')), Integer(0))), Integer(-1)),
|
||||
Pow(Indexed(IndexedBase(Symbol('B')), Integer(0)),
|
||||
Add(Symbol('a1', positive=True), Integer(-1))),
|
||||
Pow(Indexed(IndexedBase(Symbol('B')), Integer(1)),
|
||||
Add(Indexed(IndexedBase(Symbol('C')), Integer(0)), Integer(-1))))
|
||||
assert MGR(C) == mgrc
|
||||
|
||||
def test_compound_distribution():
|
||||
Y = Poisson('Y', 1)
|
||||
Z = Poisson('Z', Y)
|
||||
assert isinstance(pspace(Z), CompoundPSpace)
|
||||
assert isinstance(pspace(Z).distribution, CompoundDistribution)
|
||||
assert Z.pspace.distribution.pdf(1).doit() == exp(-2)*exp(exp(-1))
|
||||
|
||||
def test_mix_expression():
|
||||
Y, E = Poisson('Y', 1), Exponential('E', 1)
|
||||
k = Dummy('k')
|
||||
expr1 = Integral(Sum(exp(-1)*Integral(exp(-k)*DiracDelta(k - 2), (k, 0, oo)
|
||||
)/factorial(k), (k, 0, oo)), (k, -oo, 0))
|
||||
expr2 = Integral(Sum(exp(-1)*Integral(exp(-k)*DiracDelta(k - 2), (k, 0, oo)
|
||||
)/factorial(k), (k, 0, oo)), (k, 0, oo))
|
||||
assert P(Eq(Y + E, 1)) == 0
|
||||
assert P(Ne(Y + E, 2)) == 1
|
||||
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
|
||||
assert P(E + Y < 2, evaluate=False).rewrite(Integral).dummy_eq(expr1)
|
||||
assert P(E + Y > 2, evaluate=False).rewrite(Integral).dummy_eq(expr2)
|
||||
@@ -0,0 +1,135 @@
|
||||
from sympy.concrete.products import Product
|
||||
from sympy.core.function import Lambda
|
||||
from sympy.core.numbers import (I, Rational, pi)
|
||||
from sympy.core.singleton import S
|
||||
from sympy.core.symbol import Dummy
|
||||
from sympy.functions.elementary.complexes import Abs
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.functions.elementary.miscellaneous import sqrt
|
||||
from sympy.integrals.integrals import Integral
|
||||
from sympy.matrices.dense import Matrix
|
||||
from sympy.matrices.expressions.matexpr import MatrixSymbol
|
||||
from sympy.matrices.expressions.trace import Trace
|
||||
from sympy.tensor.indexed import IndexedBase
|
||||
from sympy.stats import (GaussianUnitaryEnsemble as GUE, density,
|
||||
GaussianOrthogonalEnsemble as GOE,
|
||||
GaussianSymplecticEnsemble as GSE,
|
||||
joint_eigen_distribution,
|
||||
CircularUnitaryEnsemble as CUE,
|
||||
CircularOrthogonalEnsemble as COE,
|
||||
CircularSymplecticEnsemble as CSE,
|
||||
JointEigenDistribution,
|
||||
level_spacing_distribution,
|
||||
Normal, Beta)
|
||||
from sympy.stats.joint_rv_types import JointDistributionHandmade
|
||||
from sympy.stats.rv import RandomMatrixSymbol
|
||||
from sympy.stats.random_matrix_models import GaussianEnsemble, RandomMatrixPSpace
|
||||
from sympy.testing.pytest import raises
|
||||
|
||||
def test_GaussianEnsemble():
|
||||
G = GaussianEnsemble('G', 3)
|
||||
assert density(G) == G.pspace.model
|
||||
raises(ValueError, lambda: GaussianEnsemble('G', 3.5))
|
||||
|
||||
def test_GaussianUnitaryEnsemble():
|
||||
H = RandomMatrixSymbol('H', 3, 3)
|
||||
G = GUE('U', 3)
|
||||
assert density(G)(H) == sqrt(2)*exp(-3*Trace(H**2)/2)/(4*pi**Rational(9, 2))
|
||||
i, j = (Dummy('i', integer=True, positive=True),
|
||||
Dummy('j', integer=True, positive=True))
|
||||
l = IndexedBase('l')
|
||||
assert joint_eigen_distribution(G).dummy_eq(
|
||||
Lambda((l[1], l[2], l[3]),
|
||||
27*sqrt(6)*exp(-3*(l[1]**2)/2 - 3*(l[2]**2)/2 - 3*(l[3]**2)/2)*
|
||||
Product(Abs(l[i] - l[j])**2, (j, i + 1, 3), (i, 1, 2))/(16*pi**Rational(3, 2))))
|
||||
s = Dummy('s')
|
||||
assert level_spacing_distribution(G).dummy_eq(Lambda(s, 32*s**2*exp(-4*s**2/pi)/pi**2))
|
||||
|
||||
|
||||
def test_GaussianOrthogonalEnsemble():
|
||||
H = RandomMatrixSymbol('H', 3, 3)
|
||||
_H = MatrixSymbol('_H', 3, 3)
|
||||
G = GOE('O', 3)
|
||||
assert density(G)(H) == exp(-3*Trace(H**2)/4)/Integral(exp(-3*Trace(_H**2)/4), _H)
|
||||
i, j = (Dummy('i', integer=True, positive=True),
|
||||
Dummy('j', integer=True, positive=True))
|
||||
l = IndexedBase('l')
|
||||
assert joint_eigen_distribution(G).dummy_eq(
|
||||
Lambda((l[1], l[2], l[3]),
|
||||
9*sqrt(2)*exp(-3*l[1]**2/2 - 3*l[2]**2/2 - 3*l[3]**2/2)*
|
||||
Product(Abs(l[i] - l[j]), (j, i + 1, 3), (i, 1, 2))/(32*pi)))
|
||||
s = Dummy('s')
|
||||
assert level_spacing_distribution(G).dummy_eq(Lambda(s, s*pi*exp(-s**2*pi/4)/2))
|
||||
|
||||
def test_GaussianSymplecticEnsemble():
|
||||
H = RandomMatrixSymbol('H', 3, 3)
|
||||
_H = MatrixSymbol('_H', 3, 3)
|
||||
G = GSE('O', 3)
|
||||
assert density(G)(H) == exp(-3*Trace(H**2))/Integral(exp(-3*Trace(_H**2)), _H)
|
||||
i, j = (Dummy('i', integer=True, positive=True),
|
||||
Dummy('j', integer=True, positive=True))
|
||||
l = IndexedBase('l')
|
||||
assert joint_eigen_distribution(G).dummy_eq(
|
||||
Lambda((l[1], l[2], l[3]),
|
||||
162*sqrt(3)*exp(-3*l[1]**2/2 - 3*l[2]**2/2 - 3*l[3]**2/2)*
|
||||
Product(Abs(l[i] - l[j])**4, (j, i + 1, 3), (i, 1, 2))/(5*pi**Rational(3, 2))))
|
||||
s = Dummy('s')
|
||||
assert level_spacing_distribution(G).dummy_eq(Lambda(s, S(262144)*s**4*exp(-64*s**2/(9*pi))/(729*pi**3)))
|
||||
|
||||
def test_CircularUnitaryEnsemble():
|
||||
CU = CUE('U', 3)
|
||||
j, k = (Dummy('j', integer=True, positive=True),
|
||||
Dummy('k', integer=True, positive=True))
|
||||
t = IndexedBase('t')
|
||||
assert joint_eigen_distribution(CU).dummy_eq(
|
||||
Lambda((t[1], t[2], t[3]),
|
||||
Product(Abs(exp(I*t[j]) - exp(I*t[k]))**2,
|
||||
(j, k + 1, 3), (k, 1, 2))/(48*pi**3))
|
||||
)
|
||||
|
||||
def test_CircularOrthogonalEnsemble():
|
||||
CO = COE('U', 3)
|
||||
j, k = (Dummy('j', integer=True, positive=True),
|
||||
Dummy('k', integer=True, positive=True))
|
||||
t = IndexedBase('t')
|
||||
assert joint_eigen_distribution(CO).dummy_eq(
|
||||
Lambda((t[1], t[2], t[3]),
|
||||
Product(Abs(exp(I*t[j]) - exp(I*t[k])),
|
||||
(j, k + 1, 3), (k, 1, 2))/(48*pi**2))
|
||||
)
|
||||
|
||||
def test_CircularSymplecticEnsemble():
|
||||
CS = CSE('U', 3)
|
||||
j, k = (Dummy('j', integer=True, positive=True),
|
||||
Dummy('k', integer=True, positive=True))
|
||||
t = IndexedBase('t')
|
||||
assert joint_eigen_distribution(CS).dummy_eq(
|
||||
Lambda((t[1], t[2], t[3]),
|
||||
Product(Abs(exp(I*t[j]) - exp(I*t[k]))**4,
|
||||
(j, k + 1, 3), (k, 1, 2))/(720*pi**3))
|
||||
)
|
||||
|
||||
def test_JointEigenDistribution():
|
||||
A = Matrix([[Normal('A00', 0, 1), Normal('A01', 1, 1)],
|
||||
[Beta('A10', 1, 1), Beta('A11', 1, 1)]])
|
||||
assert JointEigenDistribution(A) == \
|
||||
JointDistributionHandmade(-sqrt(A[0, 0]**2 - 2*A[0, 0]*A[1, 1] + 4*A[0, 1]*A[1, 0] + A[1, 1]**2)/2 +
|
||||
A[0, 0]/2 + A[1, 1]/2, sqrt(A[0, 0]**2 - 2*A[0, 0]*A[1, 1] + 4*A[0, 1]*A[1, 0] + A[1, 1]**2)/2 + A[0, 0]/2 + A[1, 1]/2)
|
||||
raises(ValueError, lambda: JointEigenDistribution(Matrix([[1, 0], [2, 1]])))
|
||||
|
||||
def test_issue_19841():
|
||||
G1 = GUE('U', 2)
|
||||
G2 = G1.xreplace({2: 2})
|
||||
assert G1.args == G2.args
|
||||
|
||||
X = MatrixSymbol('X', 2, 2)
|
||||
G = GSE('U', 2)
|
||||
h_pspace = RandomMatrixPSpace('P', model=density(G))
|
||||
H = RandomMatrixSymbol('H', 2, 2, pspace=h_pspace)
|
||||
H2 = RandomMatrixSymbol('H', 2, 2, pspace=None)
|
||||
assert H.doit() == H
|
||||
|
||||
assert (2*H).xreplace({H: X}) == 2*X
|
||||
assert (2*H).xreplace({H2: X}) == 2*H
|
||||
assert (2*H2).xreplace({H: X}) == 2*H2
|
||||
assert (2*H2).xreplace({H2: X}) == 2*X
|
||||
441
venv/lib/python3.12/site-packages/sympy/stats/tests/test_rv.py
Normal file
441
venv/lib/python3.12/site-packages/sympy/stats/tests/test_rv.py
Normal file
@@ -0,0 +1,441 @@
|
||||
from sympy.concrete.summations import Sum
|
||||
from sympy.core.basic import Basic
|
||||
from sympy.core.containers import Tuple
|
||||
from sympy.core.function import Lambda
|
||||
from sympy.core.numbers import (Rational, nan, oo, pi)
|
||||
from sympy.core.relational import Eq
|
||||
from sympy.core.singleton import S
|
||||
from sympy.core.symbol import (Symbol, symbols)
|
||||
from sympy.functions.combinatorial.factorials import (FallingFactorial, binomial)
|
||||
from sympy.functions.elementary.exponential import (exp, log)
|
||||
from sympy.functions.elementary.trigonometric import (cos, sin)
|
||||
from sympy.functions.special.delta_functions import DiracDelta
|
||||
from sympy.integrals.integrals import integrate
|
||||
from sympy.logic.boolalg import (And, Or)
|
||||
from sympy.matrices.dense import Matrix
|
||||
from sympy.sets.sets import Interval
|
||||
from sympy.tensor.indexed import Indexed
|
||||
from sympy.stats import (Die, Normal, Exponential, FiniteRV, P, E, H, variance,
|
||||
density, given, independent, dependent, where, pspace, GaussianUnitaryEnsemble,
|
||||
random_symbols, sample, Geometric, factorial_moment, Binomial, Hypergeometric,
|
||||
DiscreteUniform, Poisson, characteristic_function, moment_generating_function,
|
||||
BernoulliProcess, Variance, Expectation, Probability, Covariance, covariance, cmoment,
|
||||
moment, median)
|
||||
from sympy.stats.rv import (IndependentProductPSpace, rs_swap, Density, NamedArgsMixin,
|
||||
RandomSymbol, sample_iter, PSpace, is_random, RandomIndexedSymbol, RandomMatrixSymbol)
|
||||
from sympy.testing.pytest import raises, skip, XFAIL, warns_deprecated_sympy
|
||||
from sympy.external import import_module
|
||||
from sympy.core.numbers import comp
|
||||
from sympy.stats.frv_types import BernoulliDistribution
|
||||
from sympy.core.symbol import Dummy
|
||||
from sympy.functions.elementary.piecewise import Piecewise
|
||||
|
||||
def test_where():
|
||||
X, Y = Die('X'), Die('Y')
|
||||
Z = Normal('Z', 0, 1)
|
||||
|
||||
assert where(Z**2 <= 1).set == Interval(-1, 1)
|
||||
assert where(Z**2 <= 1).as_boolean() == Interval(-1, 1).as_relational(Z.symbol)
|
||||
assert where(And(X > Y, Y > 4)).as_boolean() == And(
|
||||
Eq(X.symbol, 6), Eq(Y.symbol, 5))
|
||||
|
||||
assert len(where(X < 3).set) == 2
|
||||
assert 1 in where(X < 3).set
|
||||
|
||||
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
|
||||
assert where(And(X**2 <= 1, X >= 0)).set == Interval(0, 1)
|
||||
XX = given(X, And(X**2 <= 1, X >= 0))
|
||||
assert XX.pspace.domain.set == Interval(0, 1)
|
||||
assert XX.pspace.domain.as_boolean() == \
|
||||
And(0 <= X.symbol, X.symbol**2 <= 1, -oo < X.symbol, X.symbol < oo)
|
||||
|
||||
with raises(TypeError):
|
||||
XX = given(X, X + 3)
|
||||
|
||||
|
||||
def test_random_symbols():
|
||||
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
|
||||
|
||||
assert set(random_symbols(2*X + 1)) == {X}
|
||||
assert set(random_symbols(2*X + Y)) == {X, Y}
|
||||
assert set(random_symbols(2*X + Y.symbol)) == {X}
|
||||
assert set(random_symbols(2)) == set()
|
||||
|
||||
|
||||
def test_characteristic_function():
|
||||
# Imports I from sympy
|
||||
from sympy.core.numbers import I
|
||||
X = Normal('X',0,1)
|
||||
Y = DiscreteUniform('Y', [1,2,7])
|
||||
Z = Poisson('Z', 2)
|
||||
t = symbols('_t')
|
||||
P = Lambda(t, exp(-t**2/2))
|
||||
Q = Lambda(t, exp(7*t*I)/3 + exp(2*t*I)/3 + exp(t*I)/3)
|
||||
R = Lambda(t, exp(2 * exp(t*I) - 2))
|
||||
|
||||
|
||||
assert characteristic_function(X).dummy_eq(P)
|
||||
assert characteristic_function(Y).dummy_eq(Q)
|
||||
assert characteristic_function(Z).dummy_eq(R)
|
||||
|
||||
|
||||
def test_moment_generating_function():
|
||||
|
||||
X = Normal('X',0,1)
|
||||
Y = DiscreteUniform('Y', [1,2,7])
|
||||
Z = Poisson('Z', 2)
|
||||
t = symbols('_t')
|
||||
P = Lambda(t, exp(t**2/2))
|
||||
Q = Lambda(t, (exp(7*t)/3 + exp(2*t)/3 + exp(t)/3))
|
||||
R = Lambda(t, exp(2 * exp(t) - 2))
|
||||
|
||||
|
||||
assert moment_generating_function(X).dummy_eq(P)
|
||||
assert moment_generating_function(Y).dummy_eq(Q)
|
||||
assert moment_generating_function(Z).dummy_eq(R)
|
||||
|
||||
def test_sample_iter():
|
||||
|
||||
X = Normal('X',0,1)
|
||||
Y = DiscreteUniform('Y', [1, 2, 7])
|
||||
Z = Poisson('Z', 2)
|
||||
|
||||
scipy = import_module('scipy')
|
||||
if not scipy:
|
||||
skip('Scipy is not installed. Abort tests')
|
||||
expr = X**2 + 3
|
||||
iterator = sample_iter(expr)
|
||||
|
||||
expr2 = Y**2 + 5*Y + 4
|
||||
iterator2 = sample_iter(expr2)
|
||||
|
||||
expr3 = Z**3 + 4
|
||||
iterator3 = sample_iter(expr3)
|
||||
|
||||
def is_iterator(obj):
|
||||
if (
|
||||
hasattr(obj, '__iter__') and
|
||||
(hasattr(obj, 'next') or
|
||||
hasattr(obj, '__next__')) and
|
||||
callable(obj.__iter__) and
|
||||
obj.__iter__() is obj
|
||||
):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
assert is_iterator(iterator)
|
||||
assert is_iterator(iterator2)
|
||||
assert is_iterator(iterator3)
|
||||
|
||||
def test_pspace():
|
||||
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
|
||||
x = Symbol('x')
|
||||
|
||||
raises(ValueError, lambda: pspace(5 + 3))
|
||||
raises(ValueError, lambda: pspace(x < 1))
|
||||
assert pspace(X) == X.pspace
|
||||
assert pspace(2*X + 1) == X.pspace
|
||||
assert pspace(2*X + Y) == IndependentProductPSpace(Y.pspace, X.pspace)
|
||||
|
||||
def test_rs_swap():
|
||||
X = Normal('x', 0, 1)
|
||||
Y = Exponential('y', 1)
|
||||
|
||||
XX = Normal('x', 0, 2)
|
||||
YY = Normal('y', 0, 3)
|
||||
|
||||
expr = 2*X + Y
|
||||
assert expr.subs(rs_swap((X, Y), (YY, XX))) == 2*XX + YY
|
||||
|
||||
|
||||
def test_RandomSymbol():
|
||||
|
||||
X = Normal('x', 0, 1)
|
||||
Y = Normal('x', 0, 2)
|
||||
assert X.symbol == Y.symbol
|
||||
assert X != Y
|
||||
|
||||
assert X.name == X.symbol.name
|
||||
|
||||
X = Normal('lambda', 0, 1) # make sure we can use protected terms
|
||||
X = Normal('Lambda', 0, 1) # make sure we can use SymPy terms
|
||||
|
||||
|
||||
def test_RandomSymbol_diff():
|
||||
X = Normal('x', 0, 1)
|
||||
assert (2*X).diff(X)
|
||||
|
||||
|
||||
def test_random_symbol_no_pspace():
|
||||
x = RandomSymbol(Symbol('x'))
|
||||
assert x.pspace == PSpace()
|
||||
|
||||
def test_overlap():
|
||||
X = Normal('x', 0, 1)
|
||||
Y = Normal('x', 0, 2)
|
||||
|
||||
raises(ValueError, lambda: P(X > Y))
|
||||
|
||||
|
||||
def test_IndependentProductPSpace():
|
||||
X = Normal('X', 0, 1)
|
||||
Y = Normal('Y', 0, 1)
|
||||
px = X.pspace
|
||||
py = Y.pspace
|
||||
assert pspace(X + Y) == IndependentProductPSpace(px, py)
|
||||
assert pspace(X + Y) == IndependentProductPSpace(py, px)
|
||||
|
||||
|
||||
def test_E():
|
||||
assert E(5) == 5
|
||||
|
||||
|
||||
def test_H():
|
||||
X = Normal('X', 0, 1)
|
||||
D = Die('D', sides = 4)
|
||||
G = Geometric('G', 0.5)
|
||||
assert H(X, X > 0) == -log(2)/2 + S.Half + log(pi)/2
|
||||
assert H(D, D > 2) == log(2)
|
||||
assert comp(H(G).evalf().round(2), 1.39)
|
||||
|
||||
|
||||
def test_Sample():
|
||||
X = Die('X', 6)
|
||||
Y = Normal('Y', 0, 1)
|
||||
z = Symbol('z', integer=True)
|
||||
|
||||
scipy = import_module('scipy')
|
||||
if not scipy:
|
||||
skip('Scipy is not installed. Abort tests')
|
||||
assert sample(X) in [1, 2, 3, 4, 5, 6]
|
||||
assert isinstance(sample(X + Y), float)
|
||||
|
||||
assert P(X + Y > 0, Y < 0, numsamples=10).is_number
|
||||
assert E(X + Y, numsamples=10).is_number
|
||||
assert E(X**2 + Y, numsamples=10).is_number
|
||||
assert E((X + Y)**2, numsamples=10).is_number
|
||||
assert variance(X + Y, numsamples=10).is_number
|
||||
|
||||
raises(TypeError, lambda: P(Y > z, numsamples=5))
|
||||
|
||||
assert P(sin(Y) <= 1, numsamples=10) == 1.0
|
||||
assert P(sin(Y) <= 1, cos(Y) < 1, numsamples=10) == 1.0
|
||||
|
||||
assert all(i in range(1, 7) for i in density(X, numsamples=10))
|
||||
assert all(i in range(4, 7) for i in density(X, X>3, numsamples=10))
|
||||
|
||||
numpy = import_module('numpy')
|
||||
if not numpy:
|
||||
skip('Numpy is not installed. Abort tests')
|
||||
#Test Issue #21563: Output of sample must be a float or array
|
||||
assert isinstance(sample(X), (numpy.int32, numpy.int64))
|
||||
assert isinstance(sample(Y), numpy.float64)
|
||||
assert isinstance(sample(X, size=2), numpy.ndarray)
|
||||
|
||||
with warns_deprecated_sympy():
|
||||
sample(X, numsamples=2)
|
||||
|
||||
@XFAIL
|
||||
def test_samplingE():
|
||||
scipy = import_module('scipy')
|
||||
if not scipy:
|
||||
skip('Scipy is not installed. Abort tests')
|
||||
Y = Normal('Y', 0, 1)
|
||||
z = Symbol('z', integer=True)
|
||||
assert E(Sum(1/z**Y, (z, 1, oo)), Y > 2, numsamples=3).is_number
|
||||
|
||||
|
||||
def test_given():
|
||||
X = Normal('X', 0, 1)
|
||||
Y = Normal('Y', 0, 1)
|
||||
A = given(X, True)
|
||||
B = given(X, Y > 2)
|
||||
|
||||
assert X == A == B
|
||||
|
||||
|
||||
def test_factorial_moment():
|
||||
X = Poisson('X', 2)
|
||||
Y = Binomial('Y', 2, S.Half)
|
||||
Z = Hypergeometric('Z', 4, 2, 2)
|
||||
assert factorial_moment(X, 2) == 4
|
||||
assert factorial_moment(Y, 2) == S.Half
|
||||
assert factorial_moment(Z, 2) == Rational(1, 3)
|
||||
|
||||
x, y, z, l = symbols('x y z l')
|
||||
Y = Binomial('Y', 2, y)
|
||||
Z = Hypergeometric('Z', 10, 2, 3)
|
||||
assert factorial_moment(Y, l) == y**2*FallingFactorial(
|
||||
2, l) + 2*y*(1 - y)*FallingFactorial(1, l) + (1 - y)**2*\
|
||||
FallingFactorial(0, l)
|
||||
assert factorial_moment(Z, l) == 7*FallingFactorial(0, l)/\
|
||||
15 + 7*FallingFactorial(1, l)/15 + FallingFactorial(2, l)/15
|
||||
|
||||
|
||||
def test_dependence():
|
||||
X, Y = Die('X'), Die('Y')
|
||||
assert independent(X, 2*Y)
|
||||
assert not dependent(X, 2*Y)
|
||||
|
||||
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
|
||||
assert independent(X, Y)
|
||||
assert dependent(X, 2*X)
|
||||
|
||||
# Create a dependency
|
||||
XX, YY = given(Tuple(X, Y), Eq(X + Y, 3))
|
||||
assert dependent(XX, YY)
|
||||
|
||||
def test_dependent_finite():
|
||||
X, Y = Die('X'), Die('Y')
|
||||
# Dependence testing requires symbolic conditions which currently break
|
||||
# finite random variables
|
||||
assert dependent(X, Y + X)
|
||||
|
||||
XX, YY = given(Tuple(X, Y), X + Y > 5) # Create a dependency
|
||||
assert dependent(XX, YY)
|
||||
|
||||
|
||||
def test_normality():
|
||||
X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
|
||||
x = Symbol('x', real=True)
|
||||
z = Symbol('z', real=True)
|
||||
dens = density(X - Y, Eq(X + Y, z))
|
||||
|
||||
assert integrate(dens(x), (x, -oo, oo)) == 1
|
||||
|
||||
|
||||
def test_Density():
|
||||
X = Die('X', 6)
|
||||
d = Density(X)
|
||||
assert d.doit() == density(X)
|
||||
|
||||
def test_NamedArgsMixin():
|
||||
class Foo(Basic, NamedArgsMixin):
|
||||
_argnames = 'foo', 'bar'
|
||||
|
||||
a = Foo(S(1), S(2))
|
||||
|
||||
assert a.foo == 1
|
||||
assert a.bar == 2
|
||||
|
||||
raises(AttributeError, lambda: a.baz)
|
||||
|
||||
class Bar(Basic, NamedArgsMixin):
|
||||
pass
|
||||
|
||||
raises(AttributeError, lambda: Bar(S(1), S(2)).foo)
|
||||
|
||||
def test_density_constant():
|
||||
assert density(3)(2) == 0
|
||||
assert density(3)(3) == DiracDelta(0)
|
||||
|
||||
def test_cmoment_constant():
|
||||
assert variance(3) == 0
|
||||
assert cmoment(3, 3) == 0
|
||||
assert cmoment(3, 4) == 0
|
||||
x = Symbol('x')
|
||||
assert variance(x) == 0
|
||||
assert cmoment(x, 15) == 0
|
||||
assert cmoment(x, 0) == 1
|
||||
|
||||
def test_moment_constant():
|
||||
assert moment(3, 0) == 1
|
||||
assert moment(3, 1) == 3
|
||||
assert moment(3, 2) == 9
|
||||
x = Symbol('x')
|
||||
assert moment(x, 2) == x**2
|
||||
|
||||
def test_median_constant():
|
||||
assert median(3) == 3
|
||||
x = Symbol('x')
|
||||
assert median(x) == x
|
||||
|
||||
def test_real():
|
||||
x = Normal('x', 0, 1)
|
||||
assert x.is_real
|
||||
|
||||
|
||||
def test_issue_10052():
|
||||
X = Exponential('X', 3)
|
||||
assert P(X < oo) == 1
|
||||
assert P(X > oo) == 0
|
||||
assert P(X < 2, X > oo) == 0
|
||||
assert P(X < oo, X > oo) == 0
|
||||
assert P(X < oo, X > 2) == 1
|
||||
assert P(X < 3, X == 2) == 0
|
||||
raises(ValueError, lambda: P(1))
|
||||
raises(ValueError, lambda: P(X < 1, 2))
|
||||
|
||||
def test_issue_11934():
|
||||
density = {0: .5, 1: .5}
|
||||
X = FiniteRV('X', density)
|
||||
assert E(X) == 0.5
|
||||
assert P( X>= 2) == 0
|
||||
|
||||
def test_issue_8129():
|
||||
X = Exponential('X', 4)
|
||||
assert P(X >= X) == 1
|
||||
assert P(X > X) == 0
|
||||
assert P(X > X+1) == 0
|
||||
|
||||
def test_issue_12237():
|
||||
X = Normal('X', 0, 1)
|
||||
Y = Normal('Y', 0, 1)
|
||||
U = P(X > 0, X)
|
||||
V = P(Y < 0, X)
|
||||
W = P(X + Y > 0, X)
|
||||
assert W == P(X + Y > 0, X)
|
||||
assert U == BernoulliDistribution(S.Half, S.Zero, S.One)
|
||||
assert V == S.Half
|
||||
|
||||
def test_is_random():
|
||||
X = Normal('X', 0, 1)
|
||||
Y = Normal('Y', 0, 1)
|
||||
a, b = symbols('a, b')
|
||||
G = GaussianUnitaryEnsemble('U', 2)
|
||||
B = BernoulliProcess('B', 0.9)
|
||||
assert not is_random(a)
|
||||
assert not is_random(a + b)
|
||||
assert not is_random(a * b)
|
||||
assert not is_random(Matrix([a**2, b**2]))
|
||||
assert is_random(X)
|
||||
assert is_random(X**2 + Y)
|
||||
assert is_random(Y + b**2)
|
||||
assert is_random(Y > 5)
|
||||
assert is_random(B[3] < 1)
|
||||
assert is_random(G)
|
||||
assert is_random(X * Y * B[1])
|
||||
assert is_random(Matrix([[X, B[2]], [G, Y]]))
|
||||
assert is_random(Eq(X, 4))
|
||||
|
||||
def test_issue_12283():
|
||||
x = symbols('x')
|
||||
X = RandomSymbol(x)
|
||||
Y = RandomSymbol('Y')
|
||||
Z = RandomMatrixSymbol('Z', 2, 1)
|
||||
W = RandomMatrixSymbol('W', 2, 1)
|
||||
RI = RandomIndexedSymbol(Indexed('RI', 3))
|
||||
assert pspace(Z) == PSpace()
|
||||
assert pspace(RI) == PSpace()
|
||||
assert pspace(X) == PSpace()
|
||||
assert E(X) == Expectation(X)
|
||||
assert P(Y > 3) == Probability(Y > 3)
|
||||
assert variance(X) == Variance(X)
|
||||
assert variance(RI) == Variance(RI)
|
||||
assert covariance(X, Y) == Covariance(X, Y)
|
||||
assert covariance(W, Z) == Covariance(W, Z)
|
||||
|
||||
def test_issue_6810():
|
||||
X = Die('X', 6)
|
||||
Y = Normal('Y', 0, 1)
|
||||
assert P(Eq(X, 2)) == S(1)/6
|
||||
assert P(Eq(Y, 0)) == 0
|
||||
assert P(Or(X > 2, X < 3)) == 1
|
||||
assert P(And(X > 3, X > 2)) == S(1)/2
|
||||
|
||||
def test_issue_20286():
|
||||
n, p = symbols('n p')
|
||||
B = Binomial('B', n, p)
|
||||
k = Dummy('k', integer = True)
|
||||
eq = Sum(Piecewise((-p**k*(1 - p)**(-k + n)*log(p**k*(1 - p)**(-k + n)*binomial(n, k))*binomial(n, k), (k >= 0) & (k <= n)), (nan, True)), (k, 0, n))
|
||||
assert eq.dummy_eq(H(B))
|
||||
@@ -0,0 +1,763 @@
|
||||
from sympy.concrete.summations import Sum
|
||||
from sympy.core.containers import Tuple
|
||||
from sympy.core.function import Lambda
|
||||
from sympy.core.numbers import (Float, Rational, oo, pi)
|
||||
from sympy.core.relational import (Eq, Ge, Gt, Le, Lt, Ne)
|
||||
from sympy.core.singleton import S
|
||||
from sympy.core.symbol import (Symbol, symbols)
|
||||
from sympy.functions.combinatorial.factorials import factorial
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.functions.elementary.integers import ceiling
|
||||
from sympy.functions.elementary.miscellaneous import sqrt
|
||||
from sympy.functions.elementary.piecewise import Piecewise
|
||||
from sympy.functions.special.error_functions import erf
|
||||
from sympy.functions.special.gamma_functions import (gamma, lowergamma)
|
||||
from sympy.logic.boolalg import (And, Not)
|
||||
from sympy.matrices.dense import Matrix
|
||||
from sympy.matrices.expressions.matexpr import MatrixSymbol
|
||||
from sympy.matrices.immutable import ImmutableMatrix
|
||||
from sympy.sets.contains import Contains
|
||||
from sympy.sets.fancysets import Range
|
||||
from sympy.sets.sets import (FiniteSet, Interval)
|
||||
from sympy.stats import (DiscreteMarkovChain, P, TransitionMatrixOf, E,
|
||||
StochasticStateSpaceOf, variance, ContinuousMarkovChain,
|
||||
BernoulliProcess, PoissonProcess, WienerProcess,
|
||||
GammaProcess, sample_stochastic_process)
|
||||
from sympy.stats.joint_rv import JointDistribution
|
||||
from sympy.stats.joint_rv_types import JointDistributionHandmade
|
||||
from sympy.stats.rv import RandomIndexedSymbol
|
||||
from sympy.stats.symbolic_probability import Probability, Expectation
|
||||
from sympy.testing.pytest import (raises, skip, ignore_warnings,
|
||||
warns_deprecated_sympy)
|
||||
from sympy.external import import_module
|
||||
from sympy.stats.frv_types import BernoulliDistribution
|
||||
from sympy.stats.drv_types import PoissonDistribution
|
||||
from sympy.stats.crv_types import NormalDistribution, GammaDistribution
|
||||
from sympy.core.symbol import Str
|
||||
|
||||
|
||||
def test_DiscreteMarkovChain():
|
||||
|
||||
# pass only the name
|
||||
X = DiscreteMarkovChain("X")
|
||||
assert isinstance(X.state_space, Range)
|
||||
assert X.index_set == S.Naturals0
|
||||
assert isinstance(X.transition_probabilities, MatrixSymbol)
|
||||
t = symbols('t', positive=True, integer=True)
|
||||
assert isinstance(X[t], RandomIndexedSymbol)
|
||||
assert E(X[0]) == Expectation(X[0])
|
||||
raises(TypeError, lambda: DiscreteMarkovChain(1))
|
||||
raises(NotImplementedError, lambda: X(t))
|
||||
raises(NotImplementedError, lambda: X.communication_classes())
|
||||
raises(NotImplementedError, lambda: X.canonical_form())
|
||||
raises(NotImplementedError, lambda: X.decompose())
|
||||
|
||||
nz = Symbol('n', integer=True)
|
||||
TZ = MatrixSymbol('M', nz, nz)
|
||||
SZ = Range(nz)
|
||||
YZ = DiscreteMarkovChain('Y', SZ, TZ)
|
||||
assert P(Eq(YZ[2], 1), Eq(YZ[1], 0)) == TZ[0, 1]
|
||||
|
||||
raises(ValueError, lambda: sample_stochastic_process(t))
|
||||
raises(ValueError, lambda: next(sample_stochastic_process(X)))
|
||||
# pass name and state_space
|
||||
# any hashable object should be a valid state
|
||||
# states should be valid as a tuple/set/list/Tuple/Range
|
||||
sym, rainy, cloudy, sunny = symbols('a Rainy Cloudy Sunny', real=True)
|
||||
state_spaces = [(1, 2, 3), [Str('Hello'), sym, DiscreteMarkovChain("Y", (1,2,3))],
|
||||
Tuple(S(1), exp(sym), Str('World'), sympify=False), Range(-1, 5, 2),
|
||||
[rainy, cloudy, sunny]]
|
||||
chains = [DiscreteMarkovChain("Y", state_space) for state_space in state_spaces]
|
||||
|
||||
for i, Y in enumerate(chains):
|
||||
assert isinstance(Y.transition_probabilities, MatrixSymbol)
|
||||
assert Y.state_space == state_spaces[i] or Y.state_space == FiniteSet(*state_spaces[i])
|
||||
assert Y.number_of_states == 3
|
||||
|
||||
with ignore_warnings(UserWarning): # TODO: Restore tests once warnings are removed
|
||||
assert P(Eq(Y[2], 1), Eq(Y[0], 2), evaluate=False) == Probability(Eq(Y[2], 1), Eq(Y[0], 2))
|
||||
assert E(Y[0]) == Expectation(Y[0])
|
||||
|
||||
raises(ValueError, lambda: next(sample_stochastic_process(Y)))
|
||||
|
||||
raises(TypeError, lambda: DiscreteMarkovChain("Y", {1: 1}))
|
||||
Y = DiscreteMarkovChain("Y", Range(1, t, 2))
|
||||
assert Y.number_of_states == ceiling((t-1)/2)
|
||||
|
||||
# pass name and transition_probabilities
|
||||
chains = [DiscreteMarkovChain("Y", trans_probs=Matrix([])),
|
||||
DiscreteMarkovChain("Y", trans_probs=Matrix([[0, 1], [1, 0]])),
|
||||
DiscreteMarkovChain("Y", trans_probs=Matrix([[pi, 1-pi], [sym, 1-sym]]))]
|
||||
for Z in chains:
|
||||
assert Z.number_of_states == Z.transition_probabilities.shape[0]
|
||||
assert isinstance(Z.transition_probabilities, ImmutableMatrix)
|
||||
|
||||
# pass name, state_space and transition_probabilities
|
||||
T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
|
||||
TS = MatrixSymbol('T', 3, 3)
|
||||
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
|
||||
YS = DiscreteMarkovChain("Y", ['One', 'Two', 3], TS)
|
||||
assert Y.joint_distribution(1, Y[2], 3) == JointDistribution(Y[1], Y[2], Y[3])
|
||||
raises(ValueError, lambda: Y.joint_distribution(Y[1].symbol, Y[2].symbol))
|
||||
assert P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2) == Float(0.36, 2)
|
||||
assert (P(Eq(YS[3], 2), Eq(YS[1], 1)) -
|
||||
(TS[0, 2]*TS[1, 0] + TS[1, 1]*TS[1, 2] + TS[1, 2]*TS[2, 2])).simplify() == 0
|
||||
assert P(Eq(YS[1], 1), Eq(YS[2], 2)) == Probability(Eq(YS[1], 1))
|
||||
assert P(Eq(YS[3], 3), Eq(YS[1], 1)) == TS[0, 2]*TS[1, 0] + TS[1, 1]*TS[1, 2] + TS[1, 2]*TS[2, 2]
|
||||
TO = Matrix([[0.25, 0.75, 0],[0, 0.25, 0.75],[0.75, 0, 0.25]])
|
||||
assert P(Eq(Y[3], 2), Eq(Y[1], 1) & TransitionMatrixOf(Y, TO)).round(3) == Float(0.375, 3)
|
||||
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
|
||||
assert E(Y[3], evaluate=False) == Expectation(Y[3])
|
||||
assert E(Y[3], Eq(Y[2], 1)).round(2) == Float(1.1, 3)
|
||||
TSO = MatrixSymbol('T', 4, 4)
|
||||
raises(ValueError, lambda: str(P(Eq(YS[3], 2), Eq(YS[1], 1) & TransitionMatrixOf(YS, TSO))))
|
||||
raises(TypeError, lambda: DiscreteMarkovChain("Z", [0, 1, 2], symbols('M')))
|
||||
raises(ValueError, lambda: DiscreteMarkovChain("Z", [0, 1, 2], MatrixSymbol('T', 3, 4)))
|
||||
raises(ValueError, lambda: E(Y[3], Eq(Y[2], 6)))
|
||||
raises(ValueError, lambda: E(Y[2], Eq(Y[3], 1)))
|
||||
|
||||
|
||||
# extended tests for probability queries
|
||||
TO1 = Matrix([[Rational(1, 4), Rational(3, 4), 0],[Rational(1, 3), Rational(1, 3), Rational(1, 3)],[0, Rational(1, 4), Rational(3, 4)]])
|
||||
assert P(And(Eq(Y[2], 1), Eq(Y[1], 1), Eq(Y[0], 0)),
|
||||
Eq(Probability(Eq(Y[0], 0)), Rational(1, 4)) & TransitionMatrixOf(Y, TO1)) == Rational(1, 16)
|
||||
assert P(And(Eq(Y[2], 1), Eq(Y[1], 1), Eq(Y[0], 0)), TransitionMatrixOf(Y, TO1)) == \
|
||||
Probability(Eq(Y[0], 0))/4
|
||||
assert P(Lt(X[1], 2) & Gt(X[1], 0), Eq(X[0], 2) &
|
||||
StochasticStateSpaceOf(X, [0, 1, 2]) & TransitionMatrixOf(X, TO1)) == Rational(1, 4)
|
||||
assert P(Lt(X[1], 2) & Gt(X[1], 0), Eq(X[0], 2) &
|
||||
StochasticStateSpaceOf(X, [S(0), '0', 1]) & TransitionMatrixOf(X, TO1)) == Rational(1, 4)
|
||||
assert P(Ne(X[1], 2) & Ne(X[1], 1), Eq(X[0], 2) &
|
||||
StochasticStateSpaceOf(X, [0, 1, 2]) & TransitionMatrixOf(X, TO1)) is S.Zero
|
||||
assert P(Ne(X[1], 2) & Ne(X[1], 1), Eq(X[0], 2) &
|
||||
StochasticStateSpaceOf(X, [S(0), '0', 1]) & TransitionMatrixOf(X, TO1)) is S.Zero
|
||||
assert P(And(Eq(Y[2], 1), Eq(Y[1], 1), Eq(Y[0], 0)), Eq(Y[1], 1)) == 0.1*Probability(Eq(Y[0], 0))
|
||||
|
||||
# testing properties of Markov chain
|
||||
TO2 = Matrix([[S.One, 0, 0],[Rational(1, 3), Rational(1, 3), Rational(1, 3)],[0, Rational(1, 4), Rational(3, 4)]])
|
||||
TO3 = Matrix([[Rational(1, 4), Rational(3, 4), 0],[Rational(1, 3), Rational(1, 3), Rational(1, 3)], [0, Rational(1, 4), Rational(3, 4)]])
|
||||
Y2 = DiscreteMarkovChain('Y', trans_probs=TO2)
|
||||
Y3 = DiscreteMarkovChain('Y', trans_probs=TO3)
|
||||
assert Y3.fundamental_matrix() == ImmutableMatrix([[176, 81, -132], [36, 141, -52], [-44, -39, 208]])/125
|
||||
assert Y2.is_absorbing_chain() == True
|
||||
assert Y3.is_absorbing_chain() == False
|
||||
assert Y2.canonical_form() == ([0, 1, 2], TO2)
|
||||
assert Y3.canonical_form() == ([0, 1, 2], TO3)
|
||||
assert Y2.decompose() == ([0, 1, 2], TO2[0:1, 0:1], TO2[1:3, 0:1], TO2[1:3, 1:3])
|
||||
assert Y3.decompose() == ([0, 1, 2], TO3, Matrix(0, 3, []), Matrix(0, 0, []))
|
||||
TO4 = Matrix([[Rational(1, 5), Rational(2, 5), Rational(2, 5)], [Rational(1, 10), S.Half, Rational(2, 5)], [Rational(3, 5), Rational(3, 10), Rational(1, 10)]])
|
||||
Y4 = DiscreteMarkovChain('Y', trans_probs=TO4)
|
||||
w = ImmutableMatrix([[Rational(11, 39), Rational(16, 39), Rational(4, 13)]])
|
||||
assert Y4.limiting_distribution == w
|
||||
assert Y4.is_regular() == True
|
||||
assert Y4.is_ergodic() == True
|
||||
TS1 = MatrixSymbol('T', 3, 3)
|
||||
Y5 = DiscreteMarkovChain('Y', trans_probs=TS1)
|
||||
assert Y5.limiting_distribution(w, TO4).doit() == True
|
||||
assert Y5.stationary_distribution(condition_set=True).subs(TS1, TO4).contains(w).doit() == S.true
|
||||
TO6 = Matrix([[S.One, 0, 0, 0, 0],[S.Half, 0, S.Half, 0, 0],[0, S.Half, 0, S.Half, 0], [0, 0, S.Half, 0, S.Half], [0, 0, 0, 0, 1]])
|
||||
Y6 = DiscreteMarkovChain('Y', trans_probs=TO6)
|
||||
assert Y6.fundamental_matrix() == ImmutableMatrix([[Rational(3, 2), S.One, S.Half], [S.One, S(2), S.One], [S.Half, S.One, Rational(3, 2)]])
|
||||
assert Y6.absorbing_probabilities() == ImmutableMatrix([[Rational(3, 4), Rational(1, 4)], [S.Half, S.Half], [Rational(1, 4), Rational(3, 4)]])
|
||||
with warns_deprecated_sympy():
|
||||
Y6.absorbing_probabilites()
|
||||
TO7 = Matrix([[Rational(1, 2), Rational(1, 4), Rational(1, 4)], [Rational(1, 2), 0, Rational(1, 2)], [Rational(1, 4), Rational(1, 4), Rational(1, 2)]])
|
||||
Y7 = DiscreteMarkovChain('Y', trans_probs=TO7)
|
||||
assert Y7.is_absorbing_chain() == False
|
||||
assert Y7.fundamental_matrix() == ImmutableMatrix([[Rational(86, 75), Rational(1, 25), Rational(-14, 75)],
|
||||
[Rational(2, 25), Rational(21, 25), Rational(2, 25)],
|
||||
[Rational(-14, 75), Rational(1, 25), Rational(86, 75)]])
|
||||
|
||||
# test for zero-sized matrix functionality
|
||||
X = DiscreteMarkovChain('X', trans_probs=Matrix([]))
|
||||
assert X.number_of_states == 0
|
||||
assert X.stationary_distribution() == Matrix([[]])
|
||||
assert X.communication_classes() == []
|
||||
assert X.canonical_form() == ([], Matrix([]))
|
||||
assert X.decompose() == ([], Matrix([]), Matrix([]), Matrix([]))
|
||||
assert X.is_regular() == False
|
||||
assert X.is_ergodic() == False
|
||||
|
||||
# test communication_class
|
||||
# see https://drive.google.com/drive/folders/1HbxLlwwn2b3U8Lj7eb_ASIUb5vYaNIjg?usp=sharing
|
||||
# tutorial 2.pdf
|
||||
TO7 = Matrix([[0, 5, 5, 0, 0],
|
||||
[0, 0, 0, 10, 0],
|
||||
[5, 0, 5, 0, 0],
|
||||
[0, 10, 0, 0, 0],
|
||||
[0, 3, 0, 3, 4]])/10
|
||||
Y7 = DiscreteMarkovChain('Y', trans_probs=TO7)
|
||||
tuples = Y7.communication_classes()
|
||||
classes, recurrence, periods = list(zip(*tuples))
|
||||
assert classes == ([1, 3], [0, 2], [4])
|
||||
assert recurrence == (True, False, False)
|
||||
assert periods == (2, 1, 1)
|
||||
|
||||
TO8 = Matrix([[0, 0, 0, 10, 0, 0],
|
||||
[5, 0, 5, 0, 0, 0],
|
||||
[0, 4, 0, 0, 0, 6],
|
||||
[10, 0, 0, 0, 0, 0],
|
||||
[0, 10, 0, 0, 0, 0],
|
||||
[0, 0, 0, 5, 5, 0]])/10
|
||||
Y8 = DiscreteMarkovChain('Y', trans_probs=TO8)
|
||||
tuples = Y8.communication_classes()
|
||||
classes, recurrence, periods = list(zip(*tuples))
|
||||
assert classes == ([0, 3], [1, 2, 5, 4])
|
||||
assert recurrence == (True, False)
|
||||
assert periods == (2, 2)
|
||||
|
||||
TO9 = Matrix([[2, 0, 0, 3, 0, 0, 3, 2, 0, 0],
|
||||
[0, 10, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 2, 2, 0, 0, 0, 0, 0, 3, 3],
|
||||
[0, 0, 0, 3, 0, 0, 6, 1, 0, 0],
|
||||
[0, 0, 0, 0, 5, 5, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 10, 0, 0, 0, 0],
|
||||
[4, 0, 0, 5, 0, 0, 1, 0, 0, 0],
|
||||
[2, 0, 0, 4, 0, 0, 2, 2, 0, 0],
|
||||
[3, 0, 1, 0, 0, 0, 0, 0, 4, 2],
|
||||
[0, 0, 4, 0, 0, 0, 0, 0, 3, 3]])/10
|
||||
Y9 = DiscreteMarkovChain('Y', trans_probs=TO9)
|
||||
tuples = Y9.communication_classes()
|
||||
classes, recurrence, periods = list(zip(*tuples))
|
||||
assert classes == ([0, 3, 6, 7], [1], [2, 8, 9], [5], [4])
|
||||
assert recurrence == (True, True, False, True, False)
|
||||
assert periods == (1, 1, 1, 1, 1)
|
||||
|
||||
# test canonical form
|
||||
# see https://web.archive.org/web/20201230182007/https://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
|
||||
# example 11.13
|
||||
T = Matrix([[1, 0, 0, 0, 0],
|
||||
[S(1) / 2, 0, S(1) / 2, 0, 0],
|
||||
[0, S(1) / 2, 0, S(1) / 2, 0],
|
||||
[0, 0, S(1) / 2, 0, S(1) / 2],
|
||||
[0, 0, 0, 0, S(1)]])
|
||||
DW = DiscreteMarkovChain('DW', [0, 1, 2, 3, 4], T)
|
||||
states, A, B, C = DW.decompose()
|
||||
assert states == [0, 4, 1, 2, 3]
|
||||
assert A == Matrix([[1, 0], [0, 1]])
|
||||
assert B == Matrix([[S(1)/2, 0], [0, 0], [0, S(1)/2]])
|
||||
assert C == Matrix([[0, S(1)/2, 0], [S(1)/2, 0, S(1)/2], [0, S(1)/2, 0]])
|
||||
states, new_matrix = DW.canonical_form()
|
||||
assert states == [0, 4, 1, 2, 3]
|
||||
assert new_matrix == Matrix([[1, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 0],
|
||||
[S(1)/2, 0, 0, S(1)/2, 0],
|
||||
[0, 0, S(1)/2, 0, S(1)/2],
|
||||
[0, S(1)/2, 0, S(1)/2, 0]])
|
||||
|
||||
# test regular and ergodic
|
||||
# https://web.archive.org/web/20201230182007/https://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf
|
||||
T = Matrix([[0, 4, 0, 0, 0],
|
||||
[1, 0, 3, 0, 0],
|
||||
[0, 2, 0, 2, 0],
|
||||
[0, 0, 3, 0, 1],
|
||||
[0, 0, 0, 4, 0]])/4
|
||||
X = DiscreteMarkovChain('X', trans_probs=T)
|
||||
assert not X.is_regular()
|
||||
assert X.is_ergodic()
|
||||
T = Matrix([[0, 1], [1, 0]])
|
||||
X = DiscreteMarkovChain('X', trans_probs=T)
|
||||
assert not X.is_regular()
|
||||
assert X.is_ergodic()
|
||||
# http://www.math.wisc.edu/~valko/courses/331/MC2.pdf
|
||||
T = Matrix([[2, 1, 1],
|
||||
[2, 0, 2],
|
||||
[1, 1, 2]])/4
|
||||
X = DiscreteMarkovChain('X', trans_probs=T)
|
||||
assert X.is_regular()
|
||||
assert X.is_ergodic()
|
||||
# https://docs.ufpr.br/~lucambio/CE222/1S2014/Kemeny-Snell1976.pdf
|
||||
T = Matrix([[1, 1], [1, 1]])/2
|
||||
X = DiscreteMarkovChain('X', trans_probs=T)
|
||||
assert X.is_regular()
|
||||
assert X.is_ergodic()
|
||||
|
||||
# test is_absorbing_chain
|
||||
T = Matrix([[0, 1, 0],
|
||||
[1, 0, 0],
|
||||
[0, 0, 1]])
|
||||
X = DiscreteMarkovChain('X', trans_probs=T)
|
||||
assert not X.is_absorbing_chain()
|
||||
# https://en.wikipedia.org/wiki/Absorbing_Markov_chain
|
||||
T = Matrix([[1, 1, 0, 0],
|
||||
[0, 1, 1, 0],
|
||||
[1, 0, 0, 1],
|
||||
[0, 0, 0, 2]])/2
|
||||
X = DiscreteMarkovChain('X', trans_probs=T)
|
||||
assert X.is_absorbing_chain()
|
||||
T = Matrix([[2, 0, 0, 0, 0],
|
||||
[1, 0, 1, 0, 0],
|
||||
[0, 1, 0, 1, 0],
|
||||
[0, 0, 1, 0, 1],
|
||||
[0, 0, 0, 0, 2]])/2
|
||||
X = DiscreteMarkovChain('X', trans_probs=T)
|
||||
assert X.is_absorbing_chain()
|
||||
|
||||
# test custom state space
|
||||
Y10 = DiscreteMarkovChain('Y', [1, 2, 3], TO2)
|
||||
tuples = Y10.communication_classes()
|
||||
classes, recurrence, periods = list(zip(*tuples))
|
||||
assert classes == ([1], [2, 3])
|
||||
assert recurrence == (True, False)
|
||||
assert periods == (1, 1)
|
||||
assert Y10.canonical_form() == ([1, 2, 3], TO2)
|
||||
assert Y10.decompose() == ([1, 2, 3], TO2[0:1, 0:1], TO2[1:3, 0:1], TO2[1:3, 1:3])
|
||||
|
||||
# testing miscellaneous queries
|
||||
T = Matrix([[S.Half, Rational(1, 4), Rational(1, 4)],
|
||||
[Rational(1, 3), 0, Rational(2, 3)],
|
||||
[S.Half, S.Half, 0]])
|
||||
X = DiscreteMarkovChain('X', [0, 1, 2], T)
|
||||
assert P(Eq(X[1], 2) & Eq(X[2], 1) & Eq(X[3], 0),
|
||||
Eq(P(Eq(X[1], 0)), Rational(1, 4)) & Eq(P(Eq(X[1], 1)), Rational(1, 4))) == Rational(1, 12)
|
||||
assert P(Eq(X[2], 1) | Eq(X[2], 2), Eq(X[1], 1)) == Rational(2, 3)
|
||||
assert P(Eq(X[2], 1) & Eq(X[2], 2), Eq(X[1], 1)) is S.Zero
|
||||
assert P(Ne(X[2], 2), Eq(X[1], 1)) == Rational(1, 3)
|
||||
assert E(X[1]**2, Eq(X[0], 1)) == Rational(8, 3)
|
||||
assert variance(X[1], Eq(X[0], 1)) == Rational(8, 9)
|
||||
raises(ValueError, lambda: E(X[1], Eq(X[2], 1)))
|
||||
raises(ValueError, lambda: DiscreteMarkovChain('X', [0, 1], T))
|
||||
|
||||
# testing miscellaneous queries with different state space
|
||||
X = DiscreteMarkovChain('X', ['A', 'B', 'C'], T)
|
||||
assert P(Eq(X[1], 2) & Eq(X[2], 1) & Eq(X[3], 0),
|
||||
Eq(P(Eq(X[1], 0)), Rational(1, 4)) & Eq(P(Eq(X[1], 1)), Rational(1, 4))) == Rational(1, 12)
|
||||
assert P(Eq(X[2], 1) | Eq(X[2], 2), Eq(X[1], 1)) == Rational(2, 3)
|
||||
assert P(Eq(X[2], 1) & Eq(X[2], 2), Eq(X[1], 1)) is S.Zero
|
||||
assert P(Ne(X[2], 2), Eq(X[1], 1)) == Rational(1, 3)
|
||||
a = X.state_space.args[0]
|
||||
c = X.state_space.args[2]
|
||||
assert (E(X[1] ** 2, Eq(X[0], 1)) - (a**2/3 + 2*c**2/3)).simplify() == 0
|
||||
assert (variance(X[1], Eq(X[0], 1)) - (2*(-a/3 + c/3)**2/3 + (2*a/3 - 2*c/3)**2/3)).simplify() == 0
|
||||
raises(ValueError, lambda: E(X[1], Eq(X[2], 1)))
|
||||
|
||||
#testing queries with multiple RandomIndexedSymbols
|
||||
T = Matrix([[Rational(5, 10), Rational(3, 10), Rational(2, 10)], [Rational(2, 10), Rational(7, 10), Rational(1, 10)], [Rational(3, 10), Rational(3, 10), Rational(4, 10)]])
|
||||
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
|
||||
assert P(Eq(Y[7], Y[5]), Eq(Y[2], 0)).round(5) == Float(0.44428, 5)
|
||||
assert P(Gt(Y[3], Y[1]), Eq(Y[0], 0)).round(2) == Float(0.36, 2)
|
||||
assert P(Le(Y[5], Y[10]), Eq(Y[4], 2)).round(6) == Float(0.583120, 6)
|
||||
assert Float(P(Eq(Y[10], Y[5]), Eq(Y[4], 1)), 14) == Float(1 - P(Ne(Y[10], Y[5]), Eq(Y[4], 1)), 14)
|
||||
assert Float(P(Gt(Y[8], Y[9]), Eq(Y[3], 2)), 14) == Float(1 - P(Le(Y[8], Y[9]), Eq(Y[3], 2)), 14)
|
||||
assert Float(P(Lt(Y[1], Y[4]), Eq(Y[0], 0)), 14) == Float(1 - P(Ge(Y[1], Y[4]), Eq(Y[0], 0)), 14)
|
||||
assert P(Eq(Y[5], Y[10]), Eq(Y[2], 1)) == P(Eq(Y[10], Y[5]), Eq(Y[2], 1))
|
||||
assert P(Gt(Y[1], Y[2]), Eq(Y[0], 1)) == P(Lt(Y[2], Y[1]), Eq(Y[0], 1))
|
||||
assert P(Ge(Y[7], Y[6]), Eq(Y[4], 1)) == P(Le(Y[6], Y[7]), Eq(Y[4], 1))
|
||||
|
||||
#test symbolic queries
|
||||
a, b, c, d = symbols('a b c d')
|
||||
T = Matrix([[Rational(1, 10), Rational(4, 10), Rational(5, 10)], [Rational(3, 10), Rational(4, 10), Rational(3, 10)], [Rational(7, 10), Rational(2, 10), Rational(1, 10)]])
|
||||
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
|
||||
query = P(Eq(Y[a], b), Eq(Y[c], d))
|
||||
assert query.subs({a:10, b:2, c:5, d:1}).evalf().round(4) == P(Eq(Y[10], 2), Eq(Y[5], 1)).round(4)
|
||||
assert query.subs({a:15, b:0, c:10, d:1}).evalf().round(4) == P(Eq(Y[15], 0), Eq(Y[10], 1)).round(4)
|
||||
query_gt = P(Gt(Y[a], b), Eq(Y[c], d))
|
||||
query_le = P(Le(Y[a], b), Eq(Y[c], d))
|
||||
assert query_gt.subs({a:5, b:2, c:1, d:0}).evalf() + query_le.subs({a:5, b:2, c:1, d:0}).evalf() == 1.0
|
||||
query_ge = P(Ge(Y[a], b), Eq(Y[c], d))
|
||||
query_lt = P(Lt(Y[a], b), Eq(Y[c], d))
|
||||
assert query_ge.subs({a:4, b:1, c:0, d:2}).evalf() + query_lt.subs({a:4, b:1, c:0, d:2}).evalf() == 1.0
|
||||
|
||||
#test issue 20078
|
||||
assert (2*Y[1] + 3*Y[1]).simplify() == 5*Y[1]
|
||||
assert (2*Y[1] - 3*Y[1]).simplify() == -Y[1]
|
||||
assert (2*(0.25*Y[1])).simplify() == 0.5*Y[1]
|
||||
assert ((2*Y[1]) * (0.25*Y[1])).simplify() == 0.5*Y[1]**2
|
||||
assert (Y[1]**2 + Y[1]**3).simplify() == (Y[1] + 1)*Y[1]**2
|
||||
|
||||
def test_sample_stochastic_process():
|
||||
if not import_module('scipy'):
|
||||
skip('SciPy Not installed. Skip sampling tests')
|
||||
import random
|
||||
random.seed(0)
|
||||
numpy = import_module('numpy')
|
||||
if numpy:
|
||||
numpy.random.seed(0) # scipy uses numpy to sample so to set its seed
|
||||
T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
|
||||
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
|
||||
for samps in range(10):
|
||||
assert next(sample_stochastic_process(Y)) in Y.state_space
|
||||
Z = DiscreteMarkovChain("Z", ['1', 1, 0], T)
|
||||
for samps in range(10):
|
||||
assert next(sample_stochastic_process(Z)) in Z.state_space
|
||||
|
||||
T = Matrix([[S.Half, Rational(1, 4), Rational(1, 4)],
|
||||
[Rational(1, 3), 0, Rational(2, 3)],
|
||||
[S.Half, S.Half, 0]])
|
||||
X = DiscreteMarkovChain('X', [0, 1, 2], T)
|
||||
for samps in range(10):
|
||||
assert next(sample_stochastic_process(X)) in X.state_space
|
||||
W = DiscreteMarkovChain('W', [1, pi, oo], T)
|
||||
for samps in range(10):
|
||||
assert next(sample_stochastic_process(W)) in W.state_space
|
||||
|
||||
|
||||
def test_ContinuousMarkovChain():
|
||||
T1 = Matrix([[S(-2), S(2), S.Zero],
|
||||
[S.Zero, S.NegativeOne, S.One],
|
||||
[Rational(3, 2), Rational(3, 2), S(-3)]])
|
||||
C1 = ContinuousMarkovChain('C', [0, 1, 2], T1)
|
||||
assert C1.limiting_distribution() == ImmutableMatrix([[Rational(3, 19), Rational(12, 19), Rational(4, 19)]])
|
||||
|
||||
T2 = Matrix([[-S.One, S.One, S.Zero], [S.One, -S.One, S.Zero], [S.Zero, S.One, -S.One]])
|
||||
C2 = ContinuousMarkovChain('C', [0, 1, 2], T2)
|
||||
A, t = C2.generator_matrix, symbols('t', positive=True)
|
||||
assert C2.transition_probabilities(A)(t) == Matrix([[S.Half + exp(-2*t)/2, S.Half - exp(-2*t)/2, 0],
|
||||
[S.Half - exp(-2*t)/2, S.Half + exp(-2*t)/2, 0],
|
||||
[S.Half - exp(-t) + exp(-2*t)/2, S.Half - exp(-2*t)/2, exp(-t)]])
|
||||
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
|
||||
assert P(Eq(C2(1), 1), Eq(C2(0), 1), evaluate=False) == Probability(Eq(C2(1), 1), Eq(C2(0), 1))
|
||||
assert P(Eq(C2(1), 1), Eq(C2(0), 1)) == exp(-2)/2 + S.Half
|
||||
assert P(Eq(C2(1), 0) & Eq(C2(2), 1) & Eq(C2(3), 1),
|
||||
Eq(P(Eq(C2(1), 0)), S.Half)) == (Rational(1, 4) - exp(-2)/4)*(exp(-2)/2 + S.Half)
|
||||
assert P(Not(Eq(C2(1), 0) & Eq(C2(2), 1) & Eq(C2(3), 2)) |
|
||||
(Eq(C2(1), 0) & Eq(C2(2), 1) & Eq(C2(3), 2)),
|
||||
Eq(P(Eq(C2(1), 0)), Rational(1, 4)) & Eq(P(Eq(C2(1), 1)), Rational(1, 4))) is S.One
|
||||
assert E(C2(Rational(3, 2)), Eq(C2(0), 2)) == -exp(-3)/2 + 2*exp(Rational(-3, 2)) + S.Half
|
||||
assert variance(C2(Rational(3, 2)), Eq(C2(0), 1)) == ((S.Half - exp(-3)/2)**2*(exp(-3)/2 + S.Half)
|
||||
+ (Rational(-1, 2) - exp(-3)/2)**2*(S.Half - exp(-3)/2))
|
||||
raises(KeyError, lambda: P(Eq(C2(1), 0), Eq(P(Eq(C2(1), 1)), S.Half)))
|
||||
assert P(Eq(C2(1), 0), Eq(P(Eq(C2(5), 1)), S.Half)) == Probability(Eq(C2(1), 0))
|
||||
TS1 = MatrixSymbol('G', 3, 3)
|
||||
CS1 = ContinuousMarkovChain('C', [0, 1, 2], TS1)
|
||||
A = CS1.generator_matrix
|
||||
assert CS1.transition_probabilities(A)(t) == exp(t*A)
|
||||
|
||||
C3 = ContinuousMarkovChain('C', [Symbol('0'), Symbol('1'), Symbol('2')], T2)
|
||||
assert P(Eq(C3(1), 1), Eq(C3(0), 1)) == exp(-2)/2 + S.Half
|
||||
assert P(Eq(C3(1), Symbol('1')), Eq(C3(0), Symbol('1'))) == exp(-2)/2 + S.Half
|
||||
|
||||
#test probability queries
|
||||
G = Matrix([[-S(1), Rational(1, 10), Rational(9, 10)], [Rational(2, 5), -S(1), Rational(3, 5)], [Rational(1, 2), Rational(1, 2), -S(1)]])
|
||||
C = ContinuousMarkovChain('C', state_space=[0, 1, 2], gen_mat=G)
|
||||
assert P(Eq(C(7.385), C(3.19)), Eq(C(0.862), 0)).round(5) == Float(0.35469, 5)
|
||||
assert P(Gt(C(98.715), C(19.807)), Eq(C(11.314), 2)).round(5) == Float(0.32452, 5)
|
||||
assert P(Le(C(5.9), C(10.112)), Eq(C(4), 1)).round(6) == Float(0.675214, 6)
|
||||
assert Float(P(Eq(C(7.32), C(2.91)), Eq(C(2.63), 1)), 14) == Float(1 - P(Ne(C(7.32), C(2.91)), Eq(C(2.63), 1)), 14)
|
||||
assert Float(P(Gt(C(3.36), C(1.101)), Eq(C(0.8), 2)), 14) == Float(1 - P(Le(C(3.36), C(1.101)), Eq(C(0.8), 2)), 14)
|
||||
assert Float(P(Lt(C(4.9), C(2.79)), Eq(C(1.61), 0)), 14) == Float(1 - P(Ge(C(4.9), C(2.79)), Eq(C(1.61), 0)), 14)
|
||||
assert P(Eq(C(5.243), C(10.912)), Eq(C(2.174), 1)) == P(Eq(C(10.912), C(5.243)), Eq(C(2.174), 1))
|
||||
assert P(Gt(C(2.344), C(9.9)), Eq(C(1.102), 1)) == P(Lt(C(9.9), C(2.344)), Eq(C(1.102), 1))
|
||||
assert P(Ge(C(7.87), C(1.008)), Eq(C(0.153), 1)) == P(Le(C(1.008), C(7.87)), Eq(C(0.153), 1))
|
||||
|
||||
#test symbolic queries
|
||||
a, b, c, d = symbols('a b c d')
|
||||
query = P(Eq(C(a), b), Eq(C(c), d))
|
||||
assert query.subs({a:3.65, b:2, c:1.78, d:1}).evalf().round(10) == P(Eq(C(3.65), 2), Eq(C(1.78), 1)).round(10)
|
||||
query_gt = P(Gt(C(a), b), Eq(C(c), d))
|
||||
query_le = P(Le(C(a), b), Eq(C(c), d))
|
||||
assert query_gt.subs({a:13.2, b:0, c:3.29, d:2}).evalf() + query_le.subs({a:13.2, b:0, c:3.29, d:2}).evalf() == 1.0
|
||||
query_ge = P(Ge(C(a), b), Eq(C(c), d))
|
||||
query_lt = P(Lt(C(a), b), Eq(C(c), d))
|
||||
assert query_ge.subs({a:7.43, b:1, c:1.45, d:0}).evalf() + query_lt.subs({a:7.43, b:1, c:1.45, d:0}).evalf() == 1.0
|
||||
|
||||
#test issue 20078
|
||||
assert (2*C(1) + 3*C(1)).simplify() == 5*C(1)
|
||||
assert (2*C(1) - 3*C(1)).simplify() == -C(1)
|
||||
assert (2*(0.25*C(1))).simplify() == 0.5*C(1)
|
||||
assert (2*C(1) * 0.25*C(1)).simplify() == 0.5*C(1)**2
|
||||
assert (C(1)**2 + C(1)**3).simplify() == (C(1) + 1)*C(1)**2
|
||||
|
||||
def test_BernoulliProcess():
|
||||
|
||||
B = BernoulliProcess("B", p=0.6, success=1, failure=0)
|
||||
assert B.state_space == FiniteSet(0, 1)
|
||||
assert B.index_set == S.Naturals0
|
||||
assert B.success == 1
|
||||
assert B.failure == 0
|
||||
|
||||
X = BernoulliProcess("X", p=Rational(1,3), success='H', failure='T')
|
||||
assert X.state_space == FiniteSet('H', 'T')
|
||||
H, T = symbols("H,T")
|
||||
assert E(X[1]+X[2]*X[3]) == H**2/9 + 4*H*T/9 + H/3 + 4*T**2/9 + 2*T/3
|
||||
|
||||
t, x = symbols('t, x', positive=True, integer=True)
|
||||
assert isinstance(B[t], RandomIndexedSymbol)
|
||||
|
||||
raises(ValueError, lambda: BernoulliProcess("X", p=1.1, success=1, failure=0))
|
||||
raises(NotImplementedError, lambda: B(t))
|
||||
|
||||
raises(IndexError, lambda: B[-3])
|
||||
assert B.joint_distribution(B[3], B[9]) == JointDistributionHandmade(Lambda((B[3], B[9]),
|
||||
Piecewise((0.6, Eq(B[3], 1)), (0.4, Eq(B[3], 0)), (0, True))
|
||||
*Piecewise((0.6, Eq(B[9], 1)), (0.4, Eq(B[9], 0)), (0, True))))
|
||||
|
||||
assert B.joint_distribution(2, B[4]) == JointDistributionHandmade(Lambda((B[2], B[4]),
|
||||
Piecewise((0.6, Eq(B[2], 1)), (0.4, Eq(B[2], 0)), (0, True))
|
||||
*Piecewise((0.6, Eq(B[4], 1)), (0.4, Eq(B[4], 0)), (0, True))))
|
||||
|
||||
# Test for the sum distribution of Bernoulli Process RVs
|
||||
Y = B[1] + B[2] + B[3]
|
||||
assert P(Eq(Y, 0)).round(2) == Float(0.06, 1)
|
||||
assert P(Eq(Y, 2)).round(2) == Float(0.43, 2)
|
||||
assert P(Eq(Y, 4)).round(2) == 0
|
||||
assert P(Gt(Y, 1)).round(2) == Float(0.65, 2)
|
||||
# Test for independency of each Random Indexed variable
|
||||
assert P(Eq(B[1], 0) & Eq(B[2], 1) & Eq(B[3], 0) & Eq(B[4], 1)).round(2) == Float(0.06, 1)
|
||||
|
||||
assert E(2 * B[1] + B[2]).round(2) == Float(1.80, 3)
|
||||
assert E(2 * B[1] + B[2] + 5).round(2) == Float(6.80, 3)
|
||||
assert E(B[2] * B[4] + B[10]).round(2) == Float(0.96, 2)
|
||||
assert E(B[2] > 0, Eq(B[1],1) & Eq(B[2],1)).round(2) == Float(0.60,2)
|
||||
assert E(B[1]) == 0.6
|
||||
assert P(B[1] > 0).round(2) == Float(0.60, 2)
|
||||
assert P(B[1] < 1).round(2) == Float(0.40, 2)
|
||||
assert P(B[1] > 0, B[2] <= 1).round(2) == Float(0.60, 2)
|
||||
assert P(B[12] * B[5] > 0).round(2) == Float(0.36, 2)
|
||||
assert P(B[12] * B[5] > 0, B[4] < 1).round(2) == Float(0.36, 2)
|
||||
assert P(Eq(B[2], 1), B[2] > 0) == 1.0
|
||||
assert P(Eq(B[5], 3)) == 0
|
||||
assert P(Eq(B[1], 1), B[1] < 0) == 0
|
||||
assert P(B[2] > 0, Eq(B[2], 1)) == 1
|
||||
assert P(B[2] < 0, Eq(B[2], 1)) == 0
|
||||
assert P(B[2] > 0, B[2]==7) == 0
|
||||
assert P(B[5] > 0, B[5]) == BernoulliDistribution(0.6, 0, 1)
|
||||
raises(ValueError, lambda: P(3))
|
||||
raises(ValueError, lambda: P(B[3] > 0, 3))
|
||||
|
||||
# test issue 19456
|
||||
expr = Sum(B[t], (t, 0, 4))
|
||||
expr2 = Sum(B[t], (t, 1, 3))
|
||||
expr3 = Sum(B[t]**2, (t, 1, 3))
|
||||
assert expr.doit() == B[0] + B[1] + B[2] + B[3] + B[4]
|
||||
assert expr2.doit() == Y
|
||||
assert expr3.doit() == B[1]**2 + B[2]**2 + B[3]**2
|
||||
assert B[2*t].free_symbols == {B[2*t], t}
|
||||
assert B[4].free_symbols == {B[4]}
|
||||
assert B[x*t].free_symbols == {B[x*t], x, t}
|
||||
|
||||
#test issue 20078
|
||||
assert (2*B[t] + 3*B[t]).simplify() == 5*B[t]
|
||||
assert (2*B[t] - 3*B[t]).simplify() == -B[t]
|
||||
assert (2*(0.25*B[t])).simplify() == 0.5*B[t]
|
||||
assert (2*B[t] * 0.25*B[t]).simplify() == 0.5*B[t]**2
|
||||
assert (B[t]**2 + B[t]**3).simplify() == (B[t] + 1)*B[t]**2
|
||||
|
||||
def test_PoissonProcess():
|
||||
X = PoissonProcess("X", 3)
|
||||
assert X.state_space == S.Naturals0
|
||||
assert X.index_set == Interval(0, oo)
|
||||
assert X.lamda == 3
|
||||
|
||||
t, d, x, y = symbols('t d x y', positive=True)
|
||||
assert isinstance(X(t), RandomIndexedSymbol)
|
||||
assert X.distribution(t) == PoissonDistribution(3*t)
|
||||
with warns_deprecated_sympy():
|
||||
X.distribution(X(t))
|
||||
raises(ValueError, lambda: PoissonProcess("X", -1))
|
||||
raises(NotImplementedError, lambda: X[t])
|
||||
raises(IndexError, lambda: X(-5))
|
||||
|
||||
assert X.joint_distribution(X(2), X(3)) == JointDistributionHandmade(Lambda((X(2), X(3)),
|
||||
6**X(2)*9**X(3)*exp(-15)/(factorial(X(2))*factorial(X(3)))))
|
||||
|
||||
assert X.joint_distribution(4, 6) == JointDistributionHandmade(Lambda((X(4), X(6)),
|
||||
12**X(4)*18**X(6)*exp(-30)/(factorial(X(4))*factorial(X(6)))))
|
||||
|
||||
assert P(X(t) < 1) == exp(-3*t)
|
||||
assert P(Eq(X(t), 0), Contains(t, Interval.Lopen(3, 5))) == exp(-6) # exp(-2*lamda)
|
||||
res = P(Eq(X(t), 1), Contains(t, Interval.Lopen(3, 4)))
|
||||
assert res == 3*exp(-3)
|
||||
|
||||
# Equivalent to P(Eq(X(t), 1))**4 because of non-overlapping intervals
|
||||
assert P(Eq(X(t), 1) & Eq(X(d), 1) & Eq(X(x), 1) & Eq(X(y), 1), Contains(t, Interval.Lopen(0, 1))
|
||||
& Contains(d, Interval.Lopen(1, 2)) & Contains(x, Interval.Lopen(2, 3))
|
||||
& Contains(y, Interval.Lopen(3, 4))) == res**4
|
||||
|
||||
# Return Probability because of overlapping intervals
|
||||
assert P(Eq(X(t), 2) & Eq(X(d), 3), Contains(t, Interval.Lopen(0, 2))
|
||||
& Contains(d, Interval.Ropen(2, 4))) == \
|
||||
Probability(Eq(X(d), 3) & Eq(X(t), 2), Contains(t, Interval.Lopen(0, 2))
|
||||
& Contains(d, Interval.Ropen(2, 4)))
|
||||
|
||||
raises(ValueError, lambda: P(Eq(X(t), 2) & Eq(X(d), 3),
|
||||
Contains(t, Interval.Lopen(0, 4)) & Contains(d, Interval.Lopen(3, oo)))) # no bound on d
|
||||
assert P(Eq(X(3), 2)) == 81*exp(-9)/2
|
||||
assert P(Eq(X(t), 2), Contains(t, Interval.Lopen(0, 5))) == 225*exp(-15)/2
|
||||
|
||||
# Check that probability works correctly by adding it to 1
|
||||
res1 = P(X(t) <= 3, Contains(t, Interval.Lopen(0, 5)))
|
||||
res2 = P(X(t) > 3, Contains(t, Interval.Lopen(0, 5)))
|
||||
assert res1 == 691*exp(-15)
|
||||
assert (res1 + res2).simplify() == 1
|
||||
|
||||
# Check Not and Or
|
||||
assert P(Not(Eq(X(t), 2) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) & \
|
||||
Contains(d, Interval.Lopen(7, 8))).simplify() == -18*exp(-6) + 234*exp(-9) + 1
|
||||
assert P(Eq(X(t), 2) | Ne(X(t), 4), Contains(t, Interval.Ropen(2, 4))) == 1 - 36*exp(-6)
|
||||
raises(ValueError, lambda: P(X(t) > 2, X(t) + X(d)))
|
||||
assert E(X(t)) == 3*t # property of the distribution at a given timestamp
|
||||
assert E(X(t)**2 + X(d)*2 + X(y)**3, Contains(t, Interval.Lopen(0, 1))
|
||||
& Contains(d, Interval.Lopen(1, 2)) & Contains(y, Interval.Ropen(3, 4))) == 75
|
||||
assert E(X(t)**2, Contains(t, Interval.Lopen(0, 1))) == 12
|
||||
assert E(x*(X(t) + X(d))*(X(t)**2+X(d)**2), Contains(t, Interval.Lopen(0, 1))
|
||||
& Contains(d, Interval.Ropen(1, 2))) == \
|
||||
Expectation(x*(X(d) + X(t))*(X(d)**2 + X(t)**2), Contains(t, Interval.Lopen(0, 1))
|
||||
& Contains(d, Interval.Ropen(1, 2)))
|
||||
|
||||
# Value Error because of infinite time bound
|
||||
raises(ValueError, lambda: E(X(t)**3, Contains(t, Interval.Lopen(1, oo))))
|
||||
|
||||
# Equivalent to E(X(t)**2) - E(X(d)**2) == E(X(1)**2) - E(X(1)**2) == 0
|
||||
assert E((X(t) + X(d))*(X(t) - X(d)), Contains(t, Interval.Lopen(0, 1))
|
||||
& Contains(d, Interval.Lopen(1, 2))) == 0
|
||||
assert E(X(2) + x*E(X(5))) == 15*x + 6
|
||||
assert E(x*X(1) + y) == 3*x + y
|
||||
assert P(Eq(X(1), 2) & Eq(X(t), 3), Contains(t, Interval.Lopen(1, 2))) == 81*exp(-6)/4
|
||||
Y = PoissonProcess("Y", 6)
|
||||
Z = X + Y
|
||||
assert Z.lamda == X.lamda + Y.lamda == 9
|
||||
raises(ValueError, lambda: X + 5) # should be added be only PoissonProcess instance
|
||||
N, M = Z.split(4, 5)
|
||||
assert N.lamda == 4
|
||||
assert M.lamda == 5
|
||||
raises(ValueError, lambda: Z.split(3, 2)) # 2+3 != 9
|
||||
|
||||
raises(ValueError, lambda :P(Eq(X(t), 0), Contains(t, Interval.Lopen(1, 3)) & Eq(X(1), 0)))
|
||||
# check if it handles queries with two random variables in one args
|
||||
res1 = P(Eq(N(3), N(5)))
|
||||
assert res1 == P(Eq(N(t), 0), Contains(t, Interval(3, 5)))
|
||||
res2 = P(N(3) > N(1))
|
||||
assert res2 == P((N(t) > 0), Contains(t, Interval(1, 3)))
|
||||
assert P(N(3) < N(1)) == 0 # condition is not possible
|
||||
res3 = P(N(3) <= N(1)) # holds only for Eq(N(3), N(1))
|
||||
assert res3 == P(Eq(N(t), 0), Contains(t, Interval(1, 3)))
|
||||
|
||||
# tests from https://www.probabilitycourse.com/chapter11/11_1_2_basic_concepts_of_the_poisson_process.php
|
||||
X = PoissonProcess('X', 10) # 11.1
|
||||
assert P(Eq(X(S(1)/3), 3) & Eq(X(1), 10)) == exp(-10)*Rational(8000000000, 11160261)
|
||||
assert P(Eq(X(1), 1), Eq(X(S(1)/3), 3)) == 0
|
||||
assert P(Eq(X(1), 10), Eq(X(S(1)/3), 3)) == P(Eq(X(S(2)/3), 7))
|
||||
|
||||
X = PoissonProcess('X', 2) # 11.2
|
||||
assert P(X(S(1)/2) < 1) == exp(-1)
|
||||
assert P(X(3) < 1, Eq(X(1), 0)) == exp(-4)
|
||||
assert P(Eq(X(4), 3), Eq(X(2), 3)) == exp(-4)
|
||||
|
||||
X = PoissonProcess('X', 3)
|
||||
assert P(Eq(X(2), 5) & Eq(X(1), 2)) == Rational(81, 4)*exp(-6)
|
||||
|
||||
# check few properties
|
||||
assert P(X(2) <= 3, X(1)>=1) == 3*P(Eq(X(1), 0)) + 2*P(Eq(X(1), 1)) + P(Eq(X(1), 2))
|
||||
assert P(X(2) <= 3, X(1) > 1) == 2*P(Eq(X(1), 0)) + 1*P(Eq(X(1), 1))
|
||||
assert P(Eq(X(2), 5) & Eq(X(1), 2)) == P(Eq(X(1), 3))*P(Eq(X(1), 2))
|
||||
assert P(Eq(X(3), 4), Eq(X(1), 3)) == P(Eq(X(2), 1))
|
||||
|
||||
#test issue 20078
|
||||
assert (2*X(t) + 3*X(t)).simplify() == 5*X(t)
|
||||
assert (2*X(t) - 3*X(t)).simplify() == -X(t)
|
||||
assert (2*(0.25*X(t))).simplify() == 0.5*X(t)
|
||||
assert (2*X(t) * 0.25*X(t)).simplify() == 0.5*X(t)**2
|
||||
assert (X(t)**2 + X(t)**3).simplify() == (X(t) + 1)*X(t)**2
|
||||
|
||||
def test_WienerProcess():
|
||||
X = WienerProcess("X")
|
||||
assert X.state_space == S.Reals
|
||||
assert X.index_set == Interval(0, oo)
|
||||
|
||||
t, d, x, y = symbols('t d x y', positive=True)
|
||||
assert isinstance(X(t), RandomIndexedSymbol)
|
||||
assert X.distribution(t) == NormalDistribution(0, sqrt(t))
|
||||
with warns_deprecated_sympy():
|
||||
X.distribution(X(t))
|
||||
raises(ValueError, lambda: PoissonProcess("X", -1))
|
||||
raises(NotImplementedError, lambda: X[t])
|
||||
raises(IndexError, lambda: X(-2))
|
||||
|
||||
assert X.joint_distribution(X(2), X(3)) == JointDistributionHandmade(
|
||||
Lambda((X(2), X(3)), sqrt(6)*exp(-X(2)**2/4)*exp(-X(3)**2/6)/(12*pi)))
|
||||
assert X.joint_distribution(4, 6) == JointDistributionHandmade(
|
||||
Lambda((X(4), X(6)), sqrt(6)*exp(-X(4)**2/8)*exp(-X(6)**2/12)/(24*pi)))
|
||||
|
||||
assert P(X(t) < 3).simplify() == erf(3*sqrt(2)/(2*sqrt(t)))/2 + S(1)/2
|
||||
assert P(X(t) > 2, Contains(t, Interval.Lopen(3, 7))).simplify() == S(1)/2 -\
|
||||
erf(sqrt(2)/2)/2
|
||||
|
||||
# Equivalent to P(X(1)>1)**4
|
||||
assert P((X(t) > 4) & (X(d) > 3) & (X(x) > 2) & (X(y) > 1),
|
||||
Contains(t, Interval.Lopen(0, 1)) & Contains(d, Interval.Lopen(1, 2))
|
||||
& Contains(x, Interval.Lopen(2, 3)) & Contains(y, Interval.Lopen(3, 4))).simplify() ==\
|
||||
(1 - erf(sqrt(2)/2))*(1 - erf(sqrt(2)))*(1 - erf(3*sqrt(2)/2))*(1 - erf(2*sqrt(2)))/16
|
||||
|
||||
# Contains an overlapping interval so, return Probability
|
||||
assert P((X(t)< 2) & (X(d)> 3), Contains(t, Interval.Lopen(0, 2))
|
||||
& Contains(d, Interval.Ropen(2, 4))) == Probability((X(d) > 3) & (X(t) < 2),
|
||||
Contains(d, Interval.Ropen(2, 4)) & Contains(t, Interval.Lopen(0, 2)))
|
||||
|
||||
assert str(P(Not((X(t) < 5) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) &
|
||||
Contains(d, Interval.Lopen(7, 8))).simplify()) == \
|
||||
'-(1 - erf(3*sqrt(2)/2))*(2 - erfc(5/2))/4 + 1'
|
||||
# Distribution has mean 0 at each timestamp
|
||||
assert E(X(t)) == 0
|
||||
assert E(x*(X(t) + X(d))*(X(t)**2+X(d)**2), Contains(t, Interval.Lopen(0, 1))
|
||||
& Contains(d, Interval.Ropen(1, 2))) == Expectation(x*(X(d) + X(t))*(X(d)**2 + X(t)**2),
|
||||
Contains(d, Interval.Ropen(1, 2)) & Contains(t, Interval.Lopen(0, 1)))
|
||||
assert E(X(t) + x*E(X(3))) == 0
|
||||
|
||||
#test issue 20078
|
||||
assert (2*X(t) + 3*X(t)).simplify() == 5*X(t)
|
||||
assert (2*X(t) - 3*X(t)).simplify() == -X(t)
|
||||
assert (2*(0.25*X(t))).simplify() == 0.5*X(t)
|
||||
assert (2*X(t) * 0.25*X(t)).simplify() == 0.5*X(t)**2
|
||||
assert (X(t)**2 + X(t)**3).simplify() == (X(t) + 1)*X(t)**2
|
||||
|
||||
|
||||
def test_GammaProcess_symbolic():
|
||||
t, d, x, y, g, l = symbols('t d x y g l', positive=True)
|
||||
X = GammaProcess("X", l, g)
|
||||
|
||||
raises(NotImplementedError, lambda: X[t])
|
||||
raises(IndexError, lambda: X(-1))
|
||||
assert isinstance(X(t), RandomIndexedSymbol)
|
||||
assert X.state_space == Interval(0, oo)
|
||||
assert X.distribution(t) == GammaDistribution(g*t, 1/l)
|
||||
with warns_deprecated_sympy():
|
||||
X.distribution(X(t))
|
||||
assert X.joint_distribution(5, X(3)) == JointDistributionHandmade(Lambda(
|
||||
(X(5), X(3)), l**(8*g)*exp(-l*X(3))*exp(-l*X(5))*X(3)**(3*g - 1)*X(5)**(5*g
|
||||
- 1)/(gamma(3*g)*gamma(5*g))))
|
||||
# property of the gamma process at any given timestamp
|
||||
assert E(X(t)) == g*t/l
|
||||
assert variance(X(t)).simplify() == g*t/l**2
|
||||
|
||||
# Equivalent to E(2*X(1)) + E(X(1)**2) + E(X(1)**3), where E(X(1)) == g/l
|
||||
assert E(X(t)**2 + X(d)*2 + X(y)**3, Contains(t, Interval.Lopen(0, 1))
|
||||
& Contains(d, Interval.Lopen(1, 2)) & Contains(y, Interval.Ropen(3, 4))) == \
|
||||
2*g/l + (g**2 + g)/l**2 + (g**3 + 3*g**2 + 2*g)/l**3
|
||||
|
||||
assert P(X(t) > 3, Contains(t, Interval.Lopen(3, 4))).simplify() == \
|
||||
1 - lowergamma(g, 3*l)/gamma(g) # equivalent to P(X(1)>3)
|
||||
|
||||
|
||||
#test issue 20078
|
||||
assert (2*X(t) + 3*X(t)).simplify() == 5*X(t)
|
||||
assert (2*X(t) - 3*X(t)).simplify() == -X(t)
|
||||
assert (2*(0.25*X(t))).simplify() == 0.5*X(t)
|
||||
assert (2*X(t) * 0.25*X(t)).simplify() == 0.5*X(t)**2
|
||||
assert (X(t)**2 + X(t)**3).simplify() == (X(t) + 1)*X(t)**2
|
||||
def test_GammaProcess_numeric():
|
||||
t, d, x, y = symbols('t d x y', positive=True)
|
||||
X = GammaProcess("X", 1, 2)
|
||||
assert X.state_space == Interval(0, oo)
|
||||
assert X.index_set == Interval(0, oo)
|
||||
assert X.lamda == 1
|
||||
assert X.gamma == 2
|
||||
|
||||
raises(ValueError, lambda: GammaProcess("X", -1, 2))
|
||||
raises(ValueError, lambda: GammaProcess("X", 0, -2))
|
||||
raises(ValueError, lambda: GammaProcess("X", -1, -2))
|
||||
|
||||
# all are independent because of non-overlapping intervals
|
||||
assert P((X(t) > 4) & (X(d) > 3) & (X(x) > 2) & (X(y) > 1), Contains(t,
|
||||
Interval.Lopen(0, 1)) & Contains(d, Interval.Lopen(1, 2)) & Contains(x,
|
||||
Interval.Lopen(2, 3)) & Contains(y, Interval.Lopen(3, 4))).simplify() == \
|
||||
120*exp(-10)
|
||||
|
||||
# Check working with Not and Or
|
||||
assert P(Not((X(t) < 5) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) &
|
||||
Contains(d, Interval.Lopen(7, 8))).simplify() == -4*exp(-3) + 472*exp(-8)/3 + 1
|
||||
assert P((X(t) > 2) | (X(t) < 4), Contains(t, Interval.Ropen(1, 4))).simplify() == \
|
||||
-643*exp(-4)/15 + 109*exp(-2)/15 + 1
|
||||
|
||||
assert E(X(t)) == 2*t # E(X(t)) == gamma*t/l
|
||||
assert E(X(2) + x*E(X(5))) == 10*x + 4
|
||||
@@ -0,0 +1,172 @@
|
||||
from sympy.stats import Expectation, Normal, Variance, Covariance
|
||||
from sympy.testing.pytest import raises
|
||||
from sympy.core.symbol import symbols
|
||||
from sympy.matrices.exceptions import ShapeError
|
||||
from sympy.matrices.dense import Matrix
|
||||
from sympy.matrices.expressions.matexpr import MatrixSymbol
|
||||
from sympy.matrices.expressions.special import ZeroMatrix
|
||||
from sympy.stats.rv import RandomMatrixSymbol
|
||||
from sympy.stats.symbolic_multivariate_probability import (ExpectationMatrix,
|
||||
VarianceMatrix, CrossCovarianceMatrix)
|
||||
|
||||
j, k = symbols("j,k")
|
||||
|
||||
A = MatrixSymbol("A", k, k)
|
||||
B = MatrixSymbol("B", k, k)
|
||||
C = MatrixSymbol("C", k, k)
|
||||
D = MatrixSymbol("D", k, k)
|
||||
|
||||
a = MatrixSymbol("a", k, 1)
|
||||
b = MatrixSymbol("b", k, 1)
|
||||
|
||||
A2 = MatrixSymbol("A2", 2, 2)
|
||||
B2 = MatrixSymbol("B2", 2, 2)
|
||||
|
||||
X = RandomMatrixSymbol("X", k, 1)
|
||||
Y = RandomMatrixSymbol("Y", k, 1)
|
||||
Z = RandomMatrixSymbol("Z", k, 1)
|
||||
W = RandomMatrixSymbol("W", k, 1)
|
||||
|
||||
R = RandomMatrixSymbol("R", k, k)
|
||||
|
||||
X2 = RandomMatrixSymbol("X2", 2, 1)
|
||||
|
||||
normal = Normal("normal", 0, 1)
|
||||
|
||||
m1 = Matrix([
|
||||
[1, j*Normal("normal2", 2, 1)],
|
||||
[normal, 0]
|
||||
])
|
||||
|
||||
def test_multivariate_expectation():
|
||||
expr = Expectation(a)
|
||||
assert expr == Expectation(a) == ExpectationMatrix(a)
|
||||
assert expr.expand() == a
|
||||
|
||||
expr = Expectation(X)
|
||||
assert expr == Expectation(X) == ExpectationMatrix(X)
|
||||
assert expr.shape == (k, 1)
|
||||
assert expr.rows == k
|
||||
assert expr.cols == 1
|
||||
assert isinstance(expr, ExpectationMatrix)
|
||||
|
||||
expr = Expectation(A*X + b)
|
||||
assert expr == ExpectationMatrix(A*X + b)
|
||||
assert expr.expand() == A*ExpectationMatrix(X) + b
|
||||
assert isinstance(expr, ExpectationMatrix)
|
||||
assert expr.shape == (k, 1)
|
||||
|
||||
expr = Expectation(m1*X2)
|
||||
assert expr.expand() == expr
|
||||
|
||||
expr = Expectation(A2*m1*B2*X2)
|
||||
assert expr.args[0].args == (A2, m1, B2, X2)
|
||||
assert expr.expand() == A2*ExpectationMatrix(m1*B2*X2)
|
||||
|
||||
expr = Expectation((X + Y)*(X - Y).T)
|
||||
assert expr.expand() == ExpectationMatrix(X*X.T) - ExpectationMatrix(X*Y.T) +\
|
||||
ExpectationMatrix(Y*X.T) - ExpectationMatrix(Y*Y.T)
|
||||
|
||||
expr = Expectation(A*X + B*Y)
|
||||
assert expr.expand() == A*ExpectationMatrix(X) + B*ExpectationMatrix(Y)
|
||||
|
||||
assert Expectation(m1).doit() == Matrix([[1, 2*j], [0, 0]])
|
||||
|
||||
x1 = Matrix([
|
||||
[Normal('N11', 11, 1), Normal('N12', 12, 1)],
|
||||
[Normal('N21', 21, 1), Normal('N22', 22, 1)]
|
||||
])
|
||||
x2 = Matrix([
|
||||
[Normal('M11', 1, 1), Normal('M12', 2, 1)],
|
||||
[Normal('M21', 3, 1), Normal('M22', 4, 1)]
|
||||
])
|
||||
|
||||
assert Expectation(Expectation(x1 + x2)).doit(deep=False) == ExpectationMatrix(x1 + x2)
|
||||
assert Expectation(Expectation(x1 + x2)).doit() == Matrix([[12, 14], [24, 26]])
|
||||
|
||||
|
||||
def test_multivariate_variance():
|
||||
raises(ShapeError, lambda: Variance(A))
|
||||
|
||||
expr = Variance(a)
|
||||
assert expr == Variance(a) == VarianceMatrix(a)
|
||||
assert expr.expand() == ZeroMatrix(k, k)
|
||||
expr = Variance(a.T)
|
||||
assert expr == Variance(a.T) == VarianceMatrix(a.T)
|
||||
assert expr.expand() == ZeroMatrix(k, k)
|
||||
|
||||
expr = Variance(X)
|
||||
assert expr == Variance(X) == VarianceMatrix(X)
|
||||
assert expr.shape == (k, k)
|
||||
assert expr.rows == k
|
||||
assert expr.cols == k
|
||||
assert isinstance(expr, VarianceMatrix)
|
||||
|
||||
expr = Variance(A*X)
|
||||
assert expr == VarianceMatrix(A*X)
|
||||
assert expr.expand() == A*VarianceMatrix(X)*A.T
|
||||
assert isinstance(expr, VarianceMatrix)
|
||||
assert expr.shape == (k, k)
|
||||
|
||||
expr = Variance(A*B*X)
|
||||
assert expr.expand() == A*B*VarianceMatrix(X)*B.T*A.T
|
||||
|
||||
expr = Variance(m1*X2)
|
||||
assert expr.expand() == expr
|
||||
|
||||
expr = Variance(A2*m1*B2*X2)
|
||||
assert expr.args[0].args == (A2, m1, B2, X2)
|
||||
assert expr.expand() == expr
|
||||
|
||||
expr = Variance(A*X + B*Y)
|
||||
assert expr.expand() == 2*A*CrossCovarianceMatrix(X, Y)*B.T +\
|
||||
A*VarianceMatrix(X)*A.T + B*VarianceMatrix(Y)*B.T
|
||||
|
||||
def test_multivariate_crosscovariance():
|
||||
raises(ShapeError, lambda: Covariance(X, Y.T))
|
||||
raises(ShapeError, lambda: Covariance(X, A))
|
||||
|
||||
|
||||
expr = Covariance(a.T, b.T)
|
||||
assert expr.shape == (1, 1)
|
||||
assert expr.expand() == ZeroMatrix(1, 1)
|
||||
|
||||
expr = Covariance(a, b)
|
||||
assert expr == Covariance(a, b) == CrossCovarianceMatrix(a, b)
|
||||
assert expr.expand() == ZeroMatrix(k, k)
|
||||
assert expr.shape == (k, k)
|
||||
assert expr.rows == k
|
||||
assert expr.cols == k
|
||||
assert isinstance(expr, CrossCovarianceMatrix)
|
||||
|
||||
expr = Covariance(A*X + a, b)
|
||||
assert expr.expand() == ZeroMatrix(k, k)
|
||||
|
||||
expr = Covariance(X, Y)
|
||||
assert isinstance(expr, CrossCovarianceMatrix)
|
||||
assert expr.expand() == expr
|
||||
|
||||
expr = Covariance(X, X)
|
||||
assert isinstance(expr, CrossCovarianceMatrix)
|
||||
assert expr.expand() == VarianceMatrix(X)
|
||||
|
||||
expr = Covariance(X + Y, Z)
|
||||
assert isinstance(expr, CrossCovarianceMatrix)
|
||||
assert expr.expand() == CrossCovarianceMatrix(X, Z) + CrossCovarianceMatrix(Y, Z)
|
||||
|
||||
expr = Covariance(A*X, Y)
|
||||
assert isinstance(expr, CrossCovarianceMatrix)
|
||||
assert expr.expand() == A*CrossCovarianceMatrix(X, Y)
|
||||
|
||||
expr = Covariance(X, B*Y)
|
||||
assert isinstance(expr, CrossCovarianceMatrix)
|
||||
assert expr.expand() == CrossCovarianceMatrix(X, Y)*B.T
|
||||
|
||||
expr = Covariance(A*X + a, B.T*Y + b)
|
||||
assert isinstance(expr, CrossCovarianceMatrix)
|
||||
assert expr.expand() == A*CrossCovarianceMatrix(X, Y)*B
|
||||
|
||||
expr = Covariance(A*X + B*Y + a, C.T*Z + D.T*W + b)
|
||||
assert isinstance(expr, CrossCovarianceMatrix)
|
||||
assert expr.expand() == A*CrossCovarianceMatrix(X, W)*D + A*CrossCovarianceMatrix(X, Z)*C \
|
||||
+ B*CrossCovarianceMatrix(Y, W)*D + B*CrossCovarianceMatrix(Y, Z)*C
|
||||
@@ -0,0 +1,175 @@
|
||||
from sympy.concrete.summations import Sum
|
||||
from sympy.core.mul import Mul
|
||||
from sympy.core.numbers import (oo, pi)
|
||||
from sympy.core.relational import Eq
|
||||
from sympy.core.symbol import (Dummy, symbols)
|
||||
from sympy.functions.elementary.exponential import exp
|
||||
from sympy.functions.elementary.miscellaneous import sqrt
|
||||
from sympy.functions.elementary.trigonometric import sin
|
||||
from sympy.integrals.integrals import Integral
|
||||
from sympy.core.expr import unchanged
|
||||
from sympy.stats import (Normal, Poisson, variance, Covariance, Variance,
|
||||
Probability, Expectation, Moment, CentralMoment)
|
||||
from sympy.stats.rv import probability, expectation
|
||||
|
||||
|
||||
def test_literal_probability():
|
||||
X = Normal('X', 2, 3)
|
||||
Y = Normal('Y', 3, 4)
|
||||
Z = Poisson('Z', 4)
|
||||
W = Poisson('W', 3)
|
||||
x = symbols('x', real=True)
|
||||
y, w, z = symbols('y, w, z')
|
||||
|
||||
assert Probability(X > 0).evaluate_integral() == probability(X > 0)
|
||||
assert Probability(X > x).evaluate_integral() == probability(X > x)
|
||||
assert Probability(X > 0).rewrite(Integral).doit() == probability(X > 0)
|
||||
assert Probability(X > x).rewrite(Integral).doit() == probability(X > x)
|
||||
|
||||
assert Expectation(X).evaluate_integral() == expectation(X)
|
||||
assert Expectation(X).rewrite(Integral).doit() == expectation(X)
|
||||
assert Expectation(X**2).evaluate_integral() == expectation(X**2)
|
||||
assert Expectation(x*X).args == (x*X,)
|
||||
assert Expectation(x*X).expand() == x*Expectation(X)
|
||||
assert Expectation(2*X + 3*Y + z*X*Y).expand() == 2*Expectation(X) + 3*Expectation(Y) + z*Expectation(X*Y)
|
||||
assert Expectation(2*X + 3*Y + z*X*Y).args == (2*X + 3*Y + z*X*Y,)
|
||||
assert Expectation(sin(X)) == Expectation(sin(X)).expand()
|
||||
assert Expectation(2*x*sin(X)*Y + y*X**2 + z*X*Y).expand() == 2*x*Expectation(sin(X)*Y) \
|
||||
+ y*Expectation(X**2) + z*Expectation(X*Y)
|
||||
assert Expectation(X + Y).expand() == Expectation(X) + Expectation(Y)
|
||||
assert Expectation((X + Y)*(X - Y)).expand() == Expectation(X**2) - Expectation(Y**2)
|
||||
assert Expectation((X + Y)*(X - Y)).expand().doit() == -12
|
||||
assert Expectation(X + Y, evaluate=True).doit() == 5
|
||||
assert Expectation(X + Expectation(Y)).doit() == 5
|
||||
assert Expectation(X + Expectation(Y)).doit(deep=False) == 2 + Expectation(Expectation(Y))
|
||||
assert Expectation(X + Expectation(Y + Expectation(2*X))).doit(deep=False) == 2 \
|
||||
+ Expectation(Expectation(Y + Expectation(2*X)))
|
||||
assert Expectation(X + Expectation(Y + Expectation(2*X))).doit() == 9
|
||||
assert Expectation(Expectation(2*X)).doit() == 4
|
||||
assert Expectation(Expectation(2*X)).doit(deep=False) == Expectation(2*X)
|
||||
assert Expectation(4*Expectation(2*X)).doit(deep=False) == 4*Expectation(2*X)
|
||||
assert Expectation((X + Y)**3).expand() == 3*Expectation(X*Y**2) +\
|
||||
3*Expectation(X**2*Y) + Expectation(X**3) + Expectation(Y**3)
|
||||
assert Expectation((X - Y)**3).expand() == 3*Expectation(X*Y**2) -\
|
||||
3*Expectation(X**2*Y) + Expectation(X**3) - Expectation(Y**3)
|
||||
assert Expectation((X - Y)**2).expand() == -2*Expectation(X*Y) +\
|
||||
Expectation(X**2) + Expectation(Y**2)
|
||||
|
||||
assert Variance(w).args == (w,)
|
||||
assert Variance(w).expand() == 0
|
||||
assert Variance(X).evaluate_integral() == Variance(X).rewrite(Integral).doit() == variance(X)
|
||||
assert Variance(X + z).args == (X + z,)
|
||||
assert Variance(X + z).expand() == Variance(X)
|
||||
assert Variance(X*Y).args == (Mul(X, Y),)
|
||||
assert type(Variance(X*Y)) == Variance
|
||||
assert Variance(z*X).expand() == z**2*Variance(X)
|
||||
assert Variance(X + Y).expand() == Variance(X) + Variance(Y) + 2*Covariance(X, Y)
|
||||
assert Variance(X + Y + Z + W).expand() == (Variance(X) + Variance(Y) + Variance(Z) + Variance(W) +
|
||||
2 * Covariance(X, Y) + 2 * Covariance(X, Z) + 2 * Covariance(X, W) +
|
||||
2 * Covariance(Y, Z) + 2 * Covariance(Y, W) + 2 * Covariance(W, Z))
|
||||
assert Variance(X**2).evaluate_integral() == variance(X**2)
|
||||
assert unchanged(Variance, X**2)
|
||||
assert Variance(x*X**2).expand() == x**2*Variance(X**2)
|
||||
assert Variance(sin(X)).args == (sin(X),)
|
||||
assert Variance(sin(X)).expand() == Variance(sin(X))
|
||||
assert Variance(x*sin(X)).expand() == x**2*Variance(sin(X))
|
||||
|
||||
assert Covariance(w, z).args == (w, z)
|
||||
assert Covariance(w, z).expand() == 0
|
||||
assert Covariance(X, w).expand() == 0
|
||||
assert Covariance(w, X).expand() == 0
|
||||
assert Covariance(X, Y).args == (X, Y)
|
||||
assert type(Covariance(X, Y)) == Covariance
|
||||
assert Covariance(z*X + 3, Y).expand() == z*Covariance(X, Y)
|
||||
assert Covariance(X, X).args == (X, X)
|
||||
assert Covariance(X, X).expand() == Variance(X)
|
||||
assert Covariance(z*X + 3, w*Y + 4).expand() == w*z*Covariance(X,Y)
|
||||
assert Covariance(X, Y) == Covariance(Y, X)
|
||||
assert Covariance(X + Y, Z + W).expand() == Covariance(W, X) + Covariance(W, Y) + Covariance(X, Z) + Covariance(Y, Z)
|
||||
assert Covariance(x*X + y*Y, z*Z + w*W).expand() == (x*w*Covariance(W, X) + w*y*Covariance(W, Y) +
|
||||
x*z*Covariance(X, Z) + y*z*Covariance(Y, Z))
|
||||
assert Covariance(x*X**2 + y*sin(Y), z*Y*Z**2 + w*W).expand() == (w*x*Covariance(W, X**2) + w*y*Covariance(sin(Y), W) +
|
||||
x*z*Covariance(Y*Z**2, X**2) + y*z*Covariance(Y*Z**2, sin(Y)))
|
||||
assert Covariance(X, X**2).expand() == Covariance(X, X**2)
|
||||
assert Covariance(X, sin(X)).expand() == Covariance(sin(X), X)
|
||||
assert Covariance(X**2, sin(X)*Y).expand() == Covariance(sin(X)*Y, X**2)
|
||||
assert Covariance(w, X).evaluate_integral() == 0
|
||||
|
||||
|
||||
def test_probability_rewrite():
|
||||
X = Normal('X', 2, 3)
|
||||
Y = Normal('Y', 3, 4)
|
||||
Z = Poisson('Z', 4)
|
||||
W = Poisson('W', 3)
|
||||
x, y, w, z = symbols('x, y, w, z')
|
||||
|
||||
assert Variance(w).rewrite(Expectation) == 0
|
||||
assert Variance(X).rewrite(Expectation) == Expectation(X ** 2) - Expectation(X) ** 2
|
||||
assert Variance(X, condition=Y).rewrite(Expectation) == Expectation(X ** 2, Y) - Expectation(X, Y) ** 2
|
||||
assert Variance(X, Y) != Expectation(X**2) - Expectation(X)**2
|
||||
assert Variance(X + z).rewrite(Expectation) == Expectation((X + z) ** 2) - Expectation(X + z) ** 2
|
||||
assert Variance(X * Y).rewrite(Expectation) == Expectation(X ** 2 * Y ** 2) - Expectation(X * Y) ** 2
|
||||
|
||||
assert Covariance(w, X).rewrite(Expectation) == -w*Expectation(X) + Expectation(w*X)
|
||||
assert Covariance(X, Y).rewrite(Expectation) == Expectation(X*Y) - Expectation(X)*Expectation(Y)
|
||||
assert Covariance(X, Y, condition=W).rewrite(Expectation) == Expectation(X * Y, W) - Expectation(X, W) * Expectation(Y, W)
|
||||
|
||||
w, x, z = symbols("W, x, z")
|
||||
px = Probability(Eq(X, x))
|
||||
pz = Probability(Eq(Z, z))
|
||||
|
||||
assert Expectation(X).rewrite(Probability) == Integral(x*px, (x, -oo, oo))
|
||||
assert Expectation(Z).rewrite(Probability) == Sum(z*pz, (z, 0, oo))
|
||||
assert Variance(X).rewrite(Probability) == Integral(x**2*px, (x, -oo, oo)) - Integral(x*px, (x, -oo, oo))**2
|
||||
assert Variance(Z).rewrite(Probability) == Sum(z**2*pz, (z, 0, oo)) - Sum(z*pz, (z, 0, oo))**2
|
||||
assert Covariance(w, X).rewrite(Probability) == \
|
||||
-w*Integral(x*Probability(Eq(X, x)), (x, -oo, oo)) + Integral(w*x*Probability(Eq(X, x)), (x, -oo, oo))
|
||||
|
||||
# To test rewrite as sum function
|
||||
assert Variance(X).rewrite(Sum) == Variance(X).rewrite(Integral)
|
||||
assert Expectation(X).rewrite(Sum) == Expectation(X).rewrite(Integral)
|
||||
|
||||
assert Covariance(w, X).rewrite(Sum) == 0
|
||||
|
||||
assert Covariance(w, X).rewrite(Integral) == 0
|
||||
|
||||
assert Variance(X, condition=Y).rewrite(Probability) == Integral(x**2*Probability(Eq(X, x), Y), (x, -oo, oo)) - \
|
||||
Integral(x*Probability(Eq(X, x), Y), (x, -oo, oo))**2
|
||||
|
||||
|
||||
def test_symbolic_Moment():
|
||||
mu = symbols('mu', real=True)
|
||||
sigma = symbols('sigma', positive=True)
|
||||
x = symbols('x')
|
||||
X = Normal('X', mu, sigma)
|
||||
M = Moment(X, 4, 2)
|
||||
assert M.rewrite(Expectation) == Expectation((X - 2)**4)
|
||||
assert M.rewrite(Probability) == Integral((x - 2)**4*Probability(Eq(X, x)),
|
||||
(x, -oo, oo))
|
||||
k = Dummy('k')
|
||||
expri = Integral(sqrt(2)*(k - 2)**4*exp(-(k - \
|
||||
mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (k, -oo, oo))
|
||||
assert M.rewrite(Integral).dummy_eq(expri)
|
||||
assert M.doit() == (mu**4 - 8*mu**3 + 6*mu**2*sigma**2 + \
|
||||
24*mu**2 - 24*mu*sigma**2 - 32*mu + 3*sigma**4 + 24*sigma**2 + 16)
|
||||
M = Moment(2, 5)
|
||||
assert M.doit() == 2**5
|
||||
|
||||
|
||||
def test_symbolic_CentralMoment():
|
||||
mu = symbols('mu', real=True)
|
||||
sigma = symbols('sigma', positive=True)
|
||||
x = symbols('x')
|
||||
X = Normal('X', mu, sigma)
|
||||
CM = CentralMoment(X, 6)
|
||||
assert CM.rewrite(Expectation) == Expectation((X - Expectation(X))**6)
|
||||
assert CM.rewrite(Probability) == Integral((x - Integral(x*Probability(True),
|
||||
(x, -oo, oo)))**6*Probability(Eq(X, x)), (x, -oo, oo))
|
||||
k = Dummy('k')
|
||||
expri = Integral(sqrt(2)*(k - Integral(sqrt(2)*k*exp(-(k - \
|
||||
mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (k, -oo, oo)))**6*exp(-(k - \
|
||||
mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (k, -oo, oo))
|
||||
assert CM.rewrite(Integral).dummy_eq(expri)
|
||||
assert CM.doit().simplify() == 15*sigma**6
|
||||
CM = Moment(5, 5)
|
||||
assert CM.doit() == 5**5
|
||||
Reference in New Issue
Block a user