Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest] No.44-47 and No.50-59 doc style #55813

Merged
merged 9 commits into from
Aug 7, 2023
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
306 changes: 153 additions & 153 deletions python/paddle/distribution/cauchy.py

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions python/paddle/distribution/independent.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,21 +31,21 @@ class Independent(distribution.Distribution):

.. code-block:: python

import paddle
from paddle.distribution import independent

beta = paddle.distribution.Beta(paddle.to_tensor([0.5, 0.5]), paddle.to_tensor([0.5, 0.5]))
print(beta.batch_shape, beta.event_shape)
# (2,) ()
print(beta.log_prob(paddle.to_tensor(0.2)))
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.22843921, -0.22843921])
reinterpreted_beta = independent.Independent(beta, 1)
print(reinterpreted_beta.batch_shape, reinterpreted_beta.event_shape)
# () (2,)
print(reinterpreted_beta.log_prob(paddle.to_tensor([0.2, 0.2])))
# Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# -0.45687842)
>>> import paddle
>>> from paddle.distribution import independent

>>> beta = paddle.distribution.Beta(paddle.to_tensor([0.5, 0.5]), paddle.to_tensor([0.5, 0.5]))
>>> print(beta.batch_shape, beta.event_shape)
(2,) ()
>>> print(beta.log_prob(paddle.to_tensor(0.2)))
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[-0.22843921, -0.22843921])
>>> reinterpreted_beta = independent.Independent(beta, 1)
>>> print(reinterpreted_beta.batch_shape, reinterpreted_beta.event_shape)
() (2,)
>>> print(reinterpreted_beta.log_prob(paddle.to_tensor([0.2, 0.2])))
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
-0.45687842)
"""

def __init__(self, base, reinterpreted_batch_rank):
Expand Down
20 changes: 10 additions & 10 deletions python/paddle/distribution/kl.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,14 @@ def kl_divergence(p, q):

.. code-block:: python

import paddle
>>> import paddle

p = paddle.distribution.Beta(alpha=0.5, beta=0.5)
q = paddle.distribution.Beta(alpha=0.3, beta=0.7)
>>> p = paddle.distribution.Beta(alpha=0.5, beta=0.5)
>>> q = paddle.distribution.Beta(alpha=0.3, beta=0.7)

print(paddle.distribution.kl_divergence(p, q))
# Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# 0.21193528)
>>> print(paddle.distribution.kl_divergence(p, q))
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.21193528)

"""
return _dispatch(type(p), type(q))(p, q)
Expand All @@ -82,11 +82,11 @@ def register_kl(cls_p, cls_q):
Examples:
.. code-block:: python

import paddle
>>> import paddle

@paddle.distribution.register_kl(paddle.distribution.Beta, paddle.distribution.Beta)
def kl_beta_beta():
pass # insert implementation here
>>> @paddle.distribution.register_kl(paddle.distribution.Beta, paddle.distribution.Beta)
>>> def kl_beta_beta():
... pass # insert implementation here
"""
if not issubclass(cls_p, Distribution) or not issubclass(
cls_q, Distribution
Expand Down
94 changes: 46 additions & 48 deletions python/paddle/distribution/laplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,12 @@ class Laplace(distribution.Distribution):
Examples:
.. code-block:: python

import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
m.sample() # Laplace distributed with loc=0, scale=1
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 3.68546247)
>>> import paddle
>>> paddle.seed(2023)
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> m.sample() # Laplace distributed with loc=0, scale=1
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
3.68546247)

"""

Expand Down Expand Up @@ -173,13 +173,13 @@ def log_prob(self, value):
Examples:
.. code-block:: python

import paddle
>>> import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
value = paddle.to_tensor(0.1)
m.log_prob(value)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# -0.79314721)
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> value = paddle.to_tensor(0.1)
>>> m.log_prob(value)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
-0.79314721)

"""
loc, scale, value = self._validate_value(value)
Expand All @@ -205,12 +205,12 @@ def entropy(self):
Examples:
.. code-block:: python

import paddle
>>> import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
m.entropy()
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 1.69314718)
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> m.entropy()
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.69314718)
"""
return 1 + paddle.log(2 * self.scale)

Expand All @@ -236,13 +236,13 @@ def cdf(self, value):
Examples:
.. code-block:: python

import paddle
>>> import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
value = paddle.to_tensor(0.1)
m.cdf(value)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.54758132)
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> value = paddle.to_tensor(0.1)
>>> m.cdf(value)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.54758132)
"""
loc, scale, value = self._validate_value(value)
iterm = (
Expand Down Expand Up @@ -275,13 +275,12 @@ def icdf(self, value):
Examples:
.. code-block:: python

import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
value = paddle.to_tensor(0.1)
m.icdf(value)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# -1.60943794)
>>> import paddle
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> value = paddle.to_tensor(0.1)
>>> m.icdf(value)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
-1.60943794)
"""
loc, scale, value = self._validate_value(value)
term = value - 0.5
Expand All @@ -300,12 +299,11 @@ def sample(self, shape=()):
Examples:
.. code-block:: python

import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
m.sample() # Laplace distributed with loc=0, scale=1
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 3.68546247)
>>> import paddle
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> m.sample() # Laplace distributed with loc=0, scale=1
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
3.68546247)
"""
shape = shape if isinstance(shape, tuple) else tuple(shape)
with paddle.no_grad():
Expand All @@ -323,12 +321,12 @@ def rsample(self, shape):
Examples:
.. code-block:: python

import paddle

m = paddle.distribution.Laplace(paddle.to_tensor([0.0]), paddle.to_tensor([1.0]))
m.rsample((1,)) # Laplace distributed with loc=0, scale=1
# Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0.04337667]])
>>> import paddle
>>> paddle.seed(2023)
>>> m = paddle.distribution.Laplace(paddle.to_tensor([0.0]), paddle.to_tensor([1.0]))
>>> m.rsample((1,)) # Laplace distributed with loc=0, scale=1
Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.04337667]])
"""

eps = self._get_eps()
Expand Down Expand Up @@ -395,13 +393,13 @@ def kl_divergence(self, other):
Examples:
.. code-block:: python

import paddle
>>> import paddle

m1 = paddle.distribution.Laplace(paddle.to_tensor([0.0]), paddle.to_tensor([1.0]))
m2 = paddle.distribution.Laplace(paddle.to_tensor([1.0]), paddle.to_tensor([0.5]))
m1.kl_divergence(m2)
# Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [1.04261160])
>>> m1 = paddle.distribution.Laplace(paddle.to_tensor([0.0]), paddle.to_tensor([1.0]))
>>> m2 = paddle.distribution.Laplace(paddle.to_tensor([1.0]), paddle.to_tensor([0.5]))
>>> m1.kl_divergence(m2)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.04261160])
"""

var_ratio = other.scale / self.scale
Expand Down
68 changes: 38 additions & 30 deletions python/paddle/distribution/lognormal.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,36 +49,44 @@ class LogNormal(TransformedDistribution):
Examples:
.. code-block:: python

import paddle
from paddle.distribution import LogNormal

# Define a single scalar LogNormal distribution.
dist = LogNormal(loc=0., scale=3.)
# Define a batch of two scalar valued LogNormals.
# The underlying Normal of first has mean 1 and standard deviation 11, the underlying Normal of second 2 and 22.
dist = LogNormal(loc=[1., 2.], scale=[11., 22.])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample((3, ))

# Define a batch of two scalar valued LogNormals.
# Their underlying Normal have mean 1, but different standard deviations.
dist = LogNormal(loc=1., scale=[11., 22.])

# Complete example
value_tensor = paddle.to_tensor([0.8], dtype="float32")

lognormal_a = LogNormal([0.], [1.])
lognormal_b = LogNormal([0.5], [2.])
sample = lognormal_a.sample((2, ))
# a random tensor created by lognormal distribution with shape: [2, 1]
entropy = lognormal_a.entropy()
# [1.4189385] with shape: [1]
lp = lognormal_a.log_prob(value_tensor)
# [-0.72069150] with shape: [1]
p = lognormal_a.probs(value_tensor)
# [0.48641577] with shape: [1]
kl = lognormal_a.kl_divergence(lognormal_b)
# [0.34939718] with shape: [1]
>>> import paddle
>>> from paddle.distribution import LogNormal

>>> # Define a single scalar LogNormal distribution.
>>> dist = LogNormal(loc=0., scale=3.)
>>> # Define a batch of two scalar valued LogNormals.
>>> # The underlying Normal of first has mean 1 and standard deviation 11, the underlying Normal of second 2 and 22.
>>> dist = LogNormal(loc=[1., 2.], scale=[11., 22.])
>>> # Get 3 samples, returning a 3 x 2 tensor.
>>> dist.sample((3, ))

>>> # Define a batch of two scalar valued LogNormals.
>>> # Their underlying Normal have mean 1, but different standard deviations.
>>> dist = LogNormal(loc=1., scale=[11., 22.])

>>> # Complete example
>>> value_tensor = paddle.to_tensor([0.8], dtype="float32")

>>> lognormal_a = LogNormal([0.], [1.])
>>> lognormal_b = LogNormal([0.5], [2.])
>>> sample = lognormal_a.sample((2, ))
>>> # a random tensor created by lognormal distribution with shape: [2, 1]
>>> entropy = lognormal_a.entropy()
>>> print(entropy)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.41893852])
>>> lp = lognormal_a.log_prob(value_tensor)
>>> print(lp)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.72069150])
>>> p = lognormal_a.probs(value_tensor)
>>> print(p)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.48641577])
>>> kl = lognormal_a.kl_divergence(lognormal_b)
>>> print(kl)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.34939718])
"""

def __init__(self, loc, scale):
Expand Down
23 changes: 11 additions & 12 deletions python/paddle/distribution/multinomial.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,18 +53,17 @@ class Multinomial(distribution.Distribution):

.. code-block:: python

import paddle

multinomial = paddle.distribution.Multinomial(10, paddle.to_tensor([0.2, 0.3, 0.5]))
print(multinomial.sample((2, 3)))
# Tensor(shape=[2, 3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[1., 4., 5.],
# [0., 2., 8.],
# [2., 4., 4.]],

# [[1., 6., 3.],
# [3., 3., 4.],
# [3., 4., 3.]]])
>>> import paddle
>>> paddle.seed(2023)
>>> multinomial = paddle.distribution.Multinomial(10, paddle.to_tensor([0.2, 0.3, 0.5]))
>>> print(multinomial.sample((2, 3)))
Tensor(shape=[2, 3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[[1., 4., 5.],
[0., 2., 8.],
[2., 4., 4.]],
[[1., 6., 3.],
[3., 3., 4.],
[3., 4., 3.]]])
"""

def __init__(self, total_count, probs):
Expand Down
Loading