Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest] No.44-47 and No.50-59 doc style #55813

Merged
merged 9 commits into from
Aug 7, 2023
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
306 changes: 153 additions & 153 deletions python/paddle/distribution/cauchy.py

Large diffs are not rendered by default.

28 changes: 14 additions & 14 deletions python/paddle/distribution/independent.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,21 +31,21 @@ class Independent(distribution.Distribution):

.. code-block:: python

import paddle
from paddle.distribution import independent

beta = paddle.distribution.Beta(paddle.to_tensor([0.5, 0.5]), paddle.to_tensor([0.5, 0.5]))
print(beta.batch_shape, beta.event_shape)
# (2,) ()
print(beta.log_prob(paddle.to_tensor(0.2)))
# Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [-0.22843921, -0.22843921])
reinterpreted_beta = independent.Independent(beta, 1)
print(reinterpreted_beta.batch_shape, reinterpreted_beta.event_shape)
>>> import paddle
>>> from paddle.distribution import independent

>>> beta = paddle.distribution.Beta(paddle.to_tensor([0.5, 0.5]), paddle.to_tensor([0.5, 0.5]))
>>> print(beta.batch_shape, beta.event_shape)
(2,) ()
>>> print(beta.log_prob(paddle.to_tensor(0.2)))
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[-0.22843921, -0.22843921])
>>> reinterpreted_beta = independent.Independent(beta, 1)
>>> print(reinterpreted_beta.batch_shape, reinterpreted_beta.event_shape)
# () (2,)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里的 # 要删掉~

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

怎么好像都没更新?

Copy link
Member Author

@gouzil gouzil Aug 2, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

怎么好像都没更新?

7fe022e (#55813) 这次提交里更新了

print(reinterpreted_beta.log_prob(paddle.to_tensor([0.2, 0.2])))
# Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# -0.45687842)
>>> print(reinterpreted_beta.log_prob(paddle.to_tensor([0.2, 0.2])))
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
-0.45687842)
"""

def __init__(self, base, reinterpreted_batch_rank):
Expand Down
20 changes: 10 additions & 10 deletions python/paddle/distribution/kl.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,14 @@ def kl_divergence(p, q):

.. code-block:: python

import paddle
>>> import paddle

p = paddle.distribution.Beta(alpha=0.5, beta=0.5)
q = paddle.distribution.Beta(alpha=0.3, beta=0.7)
>>> p = paddle.distribution.Beta(alpha=0.5, beta=0.5)
>>> q = paddle.distribution.Beta(alpha=0.3, beta=0.7)

print(paddle.distribution.kl_divergence(p, q))
# Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# 0.21193528)
>>> print(paddle.distribution.kl_divergence(p, q))
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.21193528)

"""
return _dispatch(type(p), type(q))(p, q)
Expand All @@ -82,11 +82,11 @@ def register_kl(cls_p, cls_q):
Examples:
.. code-block:: python

import paddle
>>> import paddle

@paddle.distribution.register_kl(paddle.distribution.Beta, paddle.distribution.Beta)
def kl_beta_beta():
pass # insert implementation here
>>> @paddle.distribution.register_kl(paddle.distribution.Beta, paddle.distribution.Beta)
>>> def kl_beta_beta():
... pass # insert implementation here
"""
if not issubclass(cls_p, Distribution) or not issubclass(
cls_q, Distribution
Expand Down
91 changes: 44 additions & 47 deletions python/paddle/distribution/laplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,12 @@ class Laplace(distribution.Distribution):
Examples:
.. code-block:: python

import paddle
>>> import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
m.sample() # Laplace distributed with loc=0, scale=1
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 3.68546247)
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> m.sample() # Laplace distributed with loc=0, scale=1
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
3.68546247)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

加个 seed 吧~

>>> import paddle
>>> paddle.seed(2023)
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> print(m.sample())

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

喵喵喵?我看的是旧版本么?


"""

Expand Down Expand Up @@ -173,13 +173,13 @@ def log_prob(self, value):
Examples:
.. code-block:: python

import paddle
>>> import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
value = paddle.to_tensor(0.1)
m.log_prob(value)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# -0.79314721)
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> value = paddle.to_tensor(0.1)
>>> m.log_prob(value)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
-0.79314721)

"""
loc, scale, value = self._validate_value(value)
Expand All @@ -205,12 +205,12 @@ def entropy(self):
Examples:
.. code-block:: python

import paddle
>>> import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
m.entropy()
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 1.69314718)
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> m.entropy()
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.69314718)
"""
return 1 + paddle.log(2 * self.scale)

Expand All @@ -236,13 +236,13 @@ def cdf(self, value):
Examples:
.. code-block:: python

import paddle
>>> import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
value = paddle.to_tensor(0.1)
m.cdf(value)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.54758132)
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> value = paddle.to_tensor(0.1)
>>> m.cdf(value)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.54758132)
"""
loc, scale, value = self._validate_value(value)
iterm = (
Expand Down Expand Up @@ -275,13 +275,12 @@ def icdf(self, value):
Examples:
.. code-block:: python

import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
value = paddle.to_tensor(0.1)
m.icdf(value)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# -1.60943794)
>>> import paddle
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> value = paddle.to_tensor(0.1)
>>> m.icdf(value)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
-1.60943794)
"""
loc, scale, value = self._validate_value(value)
term = value - 0.5
Expand All @@ -300,12 +299,11 @@ def sample(self, shape=()):
Examples:
.. code-block:: python

import paddle

m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
m.sample() # Laplace distributed with loc=0, scale=1
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 3.68546247)
>>> import paddle
>>> m = paddle.distribution.Laplace(paddle.to_tensor(0.0), paddle.to_tensor(1.0))
>>> m.sample() # Laplace distributed with loc=0, scale=1
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
3.68546247)
"""
shape = shape if isinstance(shape, tuple) else tuple(shape)
with paddle.no_grad():
Expand All @@ -323,12 +321,11 @@ def rsample(self, shape):
Examples:
.. code-block:: python

import paddle

m = paddle.distribution.Laplace(paddle.to_tensor([0.0]), paddle.to_tensor([1.0]))
m.rsample((1,)) # Laplace distributed with loc=0, scale=1
# Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0.04337667]])
>>> import paddle
>>> m = paddle.distribution.Laplace(paddle.to_tensor([0.0]), paddle.to_tensor([1.0]))
>>> m.rsample((1,)) # Laplace distributed with loc=0, scale=1
Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.04337667]])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

加个 seed ~

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

"""

eps = self._get_eps()
Expand Down Expand Up @@ -395,13 +392,13 @@ def kl_divergence(self, other):
Examples:
.. code-block:: python

import paddle
>>> import paddle

m1 = paddle.distribution.Laplace(paddle.to_tensor([0.0]), paddle.to_tensor([1.0]))
m2 = paddle.distribution.Laplace(paddle.to_tensor([1.0]), paddle.to_tensor([0.5]))
m1.kl_divergence(m2)
# Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [1.04261160])
>>> m1 = paddle.distribution.Laplace(paddle.to_tensor([0.0]), paddle.to_tensor([1.0]))
>>> m2 = paddle.distribution.Laplace(paddle.to_tensor([1.0]), paddle.to_tensor([0.5]))
>>> m1.kl_divergence(m2)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.04261160])
"""

var_ratio = other.scale / self.scale
Expand Down
60 changes: 30 additions & 30 deletions python/paddle/distribution/lognormal.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,36 +49,36 @@ class LogNormal(TransformedDistribution):
Examples:
.. code-block:: python

import paddle
from paddle.distribution import LogNormal

# Define a single scalar LogNormal distribution.
dist = LogNormal(loc=0., scale=3.)
# Define a batch of two scalar valued LogNormals.
# The underlying Normal of first has mean 1 and standard deviation 11, the underlying Normal of second 2 and 22.
dist = LogNormal(loc=[1., 2.], scale=[11., 22.])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample((3, ))

# Define a batch of two scalar valued LogNormals.
# Their underlying Normal have mean 1, but different standard deviations.
dist = LogNormal(loc=1., scale=[11., 22.])

# Complete example
value_tensor = paddle.to_tensor([0.8], dtype="float32")

lognormal_a = LogNormal([0.], [1.])
lognormal_b = LogNormal([0.5], [2.])
sample = lognormal_a.sample((2, ))
# a random tensor created by lognormal distribution with shape: [2, 1]
entropy = lognormal_a.entropy()
# [1.4189385] with shape: [1]
lp = lognormal_a.log_prob(value_tensor)
# [-0.72069150] with shape: [1]
p = lognormal_a.probs(value_tensor)
# [0.48641577] with shape: [1]
kl = lognormal_a.kl_divergence(lognormal_b)
# [0.34939718] with shape: [1]
>>> import paddle
>>> from paddle.distribution import LogNormal

>>> # Define a single scalar LogNormal distribution.
>>> dist = LogNormal(loc=0., scale=3.)
>>> # Define a batch of two scalar valued LogNormals.
>>> # The underlying Normal of first has mean 1 and standard deviation 11, the underlying Normal of second 2 and 22.
>>> dist = LogNormal(loc=[1., 2.], scale=[11., 22.])
>>> # Get 3 samples, returning a 3 x 2 tensor.
>>> dist.sample((3, ))

>>> # Define a batch of two scalar valued LogNormals.
>>> # Their underlying Normal have mean 1, but different standard deviations.
>>> dist = LogNormal(loc=1., scale=[11., 22.])

>>> # Complete example
>>> value_tensor = paddle.to_tensor([0.8], dtype="float32")

>>> lognormal_a = LogNormal([0.], [1.])
>>> lognormal_b = LogNormal([0.5], [2.])
>>> sample = lognormal_a.sample((2, ))
>>> # a random tensor created by lognormal distribution with shape: [2, 1]
>>> entropy = lognormal_a.entropy()
>>> # [1.4189385] with shape: [1]
>>> lp = lognormal_a.log_prob(value_tensor)
>>> # [-0.72069150] with shape: [1]
>>> p = lognormal_a.probs(value_tensor)
>>> # [0.48641577] with shape: [1]
>>> kl = lognormal_a.kl_divergence(lognormal_b)
>>> # [0.34939718] with shape: [1]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这 4 个地方的输出用 print(entropy) 之类的代替~

>>> entropy = lognormal_a.entropy()
>>> print(entropy)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.41893852])

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里怎么也没看到更新?

"""

def __init__(self, loc, scale):
Expand Down
24 changes: 12 additions & 12 deletions python/paddle/distribution/multinomial.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,18 +53,18 @@ class Multinomial(distribution.Distribution):

.. code-block:: python

import paddle

multinomial = paddle.distribution.Multinomial(10, paddle.to_tensor([0.2, 0.3, 0.5]))
print(multinomial.sample((2, 3)))
# Tensor(shape=[2, 3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[[1., 4., 5.],
# [0., 2., 8.],
# [2., 4., 4.]],

# [[1., 6., 3.],
# [3., 3., 4.],
# [3., 4., 3.]]])
>>> import paddle

>>> multinomial = paddle.distribution.Multinomial(10, paddle.to_tensor([0.2, 0.3, 0.5]))
>>> print(multinomial.sample((2, 3)))
Tensor(shape=[2, 3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[[1., 4., 5.],
[0., 2., 8.],
[2., 4., 4.]],

[[1., 6., 3.],
[3., 3., 4.],
[3., 4., 3.]]])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

输出不要留空行~ 另外,加个 seed ~

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个要留空格吧

Python 3.10.8 (v3.10.8:aaaf517424, Oct 11 2022, 10:14:40) [Clang 13.0.0 (clang-1300.0.29.30)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import paddle
>>> paddle.seed(2023)
<paddle.fluid.libpaddle.Generator object at 0x1070bb270>
>>> multinomial = paddle.distribution.Multinomial(10, paddle.to_tensor([0.2, 0.3, 0.5]))
>>> print(multinomial.sample((2, 3)))
Tensor(shape=[2, 3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
       [[[1., 5., 4.],
         [0., 4., 6.],
         [1., 3., 6.]],

        [[2., 2., 6.],
         [0., 6., 4.],
         [3., 3., 4.]]])
>>> 

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以留空格,美观就行~ 但是不要留空行~

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
[[[1., 4., 5.],
[0., 2., 8.],
[2., 4., 4.]],
[[1., 6., 3.],
[3., 3., 4.],
[3., 4., 3.]]])
[[[1., 4., 5.],
[0., 2., 8.],
[2., 4., 4.]],
[[1., 6., 3.],
[3., 3., 4.],
[3., 4., 3.]]])

"""

def __init__(self, total_count, probs):
Expand Down
60 changes: 30 additions & 30 deletions python/paddle/distribution/normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,36 +54,36 @@ class Normal(distribution.Distribution):
Examples:
.. code-block:: python

import paddle
from paddle.distribution import Normal

# Define a single scalar Normal distribution.
dist = Normal(loc=0., scale=3.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = Normal(loc=[1., 2.], scale=[11., 22.])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])

# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = Normal(loc=1., scale=[11., 22.])

# Complete example
value_tensor = paddle.to_tensor([0.8], dtype="float32")

normal_a = Normal([0.], [1.])
normal_b = Normal([0.5], [2.])
sample = normal_a.sample([2])
# a random tensor created by normal distribution with shape: [2, 1]
entropy = normal_a.entropy()
# [1.4189385] with shape: [1]
lp = normal_a.log_prob(value_tensor)
# [-1.2389386] with shape: [1]
p = normal_a.probs(value_tensor)
# [0.28969154] with shape: [1]
kl = normal_a.kl_divergence(normal_b)
# [0.34939718] with shape: [1]
>>> import paddle
>>> from paddle.distribution import Normal

>>> # Define a single scalar Normal distribution.
>>> dist = Normal(loc=0., scale=3.)
>>> # Define a batch of two scalar valued Normals.
>>> # The first has mean 1 and standard deviation 11, the second 2 and 22.
>>> dist = Normal(loc=[1., 2.], scale=[11., 22.])
>>> # Get 3 samples, returning a 3 x 2 tensor.
>>> dist.sample([3])

>>> # Define a batch of two scalar valued Normals.
>>> # Both have mean 1, but different standard deviations.
>>> dist = Normal(loc=1., scale=[11., 22.])

>>> # Complete example
>>> value_tensor = paddle.to_tensor([0.8], dtype="float32")

>>> normal_a = Normal([0.], [1.])
>>> normal_b = Normal([0.5], [2.])
>>> sample = normal_a.sample([2])
>>> # a random tensor created by normal distribution with shape: [2, 1]
>>> entropy = normal_a.entropy()
>>> # [1.4189385] with shape: [1]
>>> lp = normal_a.log_prob(value_tensor)
>>> # [-1.2389386] with shape: [1]
>>> p = normal_a.probs(value_tensor)
>>> # [0.28969154] with shape: [1]
>>> kl = normal_a.kl_divergence(normal_b)
>>> # [0.34939718] with shape: [1]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

同上,用 print 输出~

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里有加 print 嘛?

"""

def __init__(self, loc, scale, name=None):
Expand Down
Loading