fixed dependencies

This commit is contained in:
nuknal
2024-10-24 15:46:01 +08:00
parent d16a5bd9c0
commit 1161e8d054
2005 changed files with 690883 additions and 0 deletions

6
vendor/gonum.org/v1/gonum/stat/README.md generated vendored Normal file
View File

@@ -0,0 +1,6 @@
# Gonum stat
[![go.dev reference](https://pkg.go.dev/badge/gonum.org/v1/gonum/stat)](https://pkg.go.dev/gonum.org/v1/gonum/stat)
[![GoDoc](https://godocs.io/gonum.org/v1/gonum/stat?status.svg)](https://godocs.io/gonum.org/v1/gonum/stat)
Package stat is a statistics package for the Go language.

683
vendor/gonum.org/v1/gonum/stat/combin/combin.go generated vendored Normal file
View File

@@ -0,0 +1,683 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package combin
import (
"math"
"sort"
)
const (
errNegInput = "combin: negative input"
badSetSize = "combin: n < k"
badInput = "combin: wrong input slice length"
errNonpositiveDimension = "combin: non-positive dimension"
)
// Binomial returns the binomial coefficient of (n,k), also commonly referred to
// as "n choose k".
//
// The binomial coefficient, C(n,k), is the number of unordered combinations of
// k elements in a set that is n elements big, and is defined as
//
// C(n,k) = n!/((n-k)!k!)
//
// n and k must be non-negative with n >= k, otherwise Binomial will panic.
// No check is made for overflow.
func Binomial(n, k int) int {
if n < 0 || k < 0 {
panic(errNegInput)
}
if n < k {
panic(badSetSize)
}
// (n,k) = (n, n-k)
if k > n/2 {
k = n - k
}
b := 1
for i := 1; i <= k; i++ {
b = (n - k + i) * b / i
}
return b
}
// GeneralizedBinomial returns the generalized binomial coefficient of (n, k),
// defined as
//
// Γ(n+1) / (Γ(k+1) Γ(n-k+1))
//
// where Γ is the Gamma function. GeneralizedBinomial is useful for continuous
// relaxations of the binomial coefficient, or when the binomial coefficient value
// may overflow int. In the latter case, one may use math/big for an exact
// computation.
//
// n and k must be non-negative with n >= k, otherwise GeneralizedBinomial will panic.
func GeneralizedBinomial(n, k float64) float64 {
return math.Exp(LogGeneralizedBinomial(n, k))
}
// LogGeneralizedBinomial returns the log of the generalized binomial coefficient.
// See GeneralizedBinomial for more information.
func LogGeneralizedBinomial(n, k float64) float64 {
if n < 0 || k < 0 {
panic(errNegInput)
}
if n < k {
panic(badSetSize)
}
a, _ := math.Lgamma(n + 1)
b, _ := math.Lgamma(k + 1)
c, _ := math.Lgamma(n - k + 1)
return a - b - c
}
// CombinationGenerator generates combinations iteratively. The Combinations
// function may be called to generate all combinations collectively.
type CombinationGenerator struct {
n int
k int
previous []int
remaining int
}
// NewCombinationGenerator returns a CombinationGenerator for generating the
// combinations of k elements from a set of size n.
//
// n and k must be non-negative with n >= k, otherwise NewCombinationGenerator
// will panic.
func NewCombinationGenerator(n, k int) *CombinationGenerator {
return &CombinationGenerator{
n: n,
k: k,
remaining: Binomial(n, k),
}
}
// Next advances the iterator if there are combinations remaining to be generated,
// and returns false if all combinations have been generated. Next must be called
// to initialize the first value before calling Combination or Combination will
// panic. The value returned by Combination is only changed during calls to Next.
func (c *CombinationGenerator) Next() bool {
if c.remaining <= 0 {
// Next is called before combination, so c.remaining is set to zero before
// Combination is called. Thus, Combination cannot panic on zero, and a
// second sentinel value is needed.
c.remaining = -1
return false
}
if c.previous == nil {
c.previous = make([]int, c.k)
for i := range c.previous {
c.previous[i] = i
}
} else {
nextCombination(c.previous, c.n, c.k)
}
c.remaining--
return true
}
// Combination returns the current combination. If dst is non-nil, it must have
// length k and the result will be stored in-place into dst. If dst
// is nil a new slice will be allocated and returned. If all of the combinations
// have already been constructed (Next() returns false), Combination will panic.
//
// Next must be called to initialize the first value before calling Combination
// or Combination will panic. The value returned by Combination is only changed
// during calls to Next.
func (c *CombinationGenerator) Combination(dst []int) []int {
if c.remaining == -1 {
panic("combin: all combinations have been generated")
}
if c.previous == nil {
panic("combin: Combination called before Next")
}
if dst == nil {
dst = make([]int, c.k)
} else if len(dst) != c.k {
panic(badInput)
}
copy(dst, c.previous)
return dst
}
// Combinations generates all of the combinations of k elements from a
// set of size n. The returned slice has length Binomial(n,k) and each inner slice
// has length k.
//
// n and k must be non-negative with n >= k, otherwise Combinations will panic.
//
// CombinationGenerator may alternatively be used to generate the combinations
// iteratively instead of collectively, or IndexToCombination for random access.
func Combinations(n, k int) [][]int {
combins := Binomial(n, k)
data := make([][]int, combins)
if len(data) == 0 {
return data
}
data[0] = make([]int, k)
for i := range data[0] {
data[0][i] = i
}
for i := 1; i < combins; i++ {
next := make([]int, k)
copy(next, data[i-1])
nextCombination(next, n, k)
data[i] = next
}
return data
}
// nextCombination generates the combination after s, overwriting the input value.
func nextCombination(s []int, n, k int) {
for j := k - 1; j >= 0; j-- {
if s[j] == n+j-k {
continue
}
s[j]++
for l := j + 1; l < k; l++ {
s[l] = s[j] + l - j
}
break
}
}
// CombinationIndex returns the index of the given combination.
//
// The functions CombinationIndex and IndexToCombination define a bijection
// between the integers and the Binomial(n, k) number of possible combinations.
// CombinationIndex returns the inverse of IndexToCombination.
//
// CombinationIndex panics if comb is not a sorted combination of the first
// [0,n) integers, if n or k are negative, or if k is greater than n.
func CombinationIndex(comb []int, n, k int) int {
if n < 0 || k < 0 {
panic(errNegInput)
}
if n < k {
panic(badSetSize)
}
if len(comb) != k {
panic("combin: bad length combination")
}
if !sort.IntsAreSorted(comb) {
panic("combin: input combination is not sorted")
}
contains := make(map[int]struct{}, k)
for _, v := range comb {
contains[v] = struct{}{}
}
if len(contains) != k {
panic("combin: comb contains non-unique elements")
}
// This algorithm iterates in reverse lexicograhpic order.
// Flip the index and values to swap the order.
rev := make([]int, k)
for i, v := range comb {
rev[len(comb)-i-1] = n - v - 1
}
idx := 0
for i, v := range rev {
if v >= i+1 {
idx += Binomial(v, i+1)
}
}
return Binomial(n, k) - 1 - idx
}
// IndexToCombination returns the combination corresponding to the given index.
//
// The functions CombinationIndex and IndexToCombination define a bijection
// between the integers and the Binomial(n, k) number of possible combinations.
// IndexToCombination returns the inverse of CombinationIndex (up to the order
// of the elements).
//
// The combination is stored in-place into dst if dst is non-nil, otherwise
// a new slice is allocated and returned.
//
// IndexToCombination panics if n or k are negative, if k is greater than n,
// or if idx is not in [0, Binomial(n,k)-1]. IndexToCombination will also panic
// if dst is non-nil and len(dst) is not k.
func IndexToCombination(dst []int, idx, n, k int) []int {
if idx < 0 || idx >= Binomial(n, k) {
panic("combin: invalid index")
}
if dst == nil {
dst = make([]int, k)
} else if len(dst) != k {
panic(badInput)
}
// The base algorithm indexes in reverse lexicographic order
// flip the values and the index.
idx = Binomial(n, k) - 1 - idx
for i := range dst {
// Find the largest number m such that Binomial(m, k-i) <= idx.
// This is one less than the first number such that it is larger.
m := sort.Search(n, func(m int) bool {
if m < k-i {
return false
}
return Binomial(m, k-i) > idx
})
m--
// Normally this is put m into the last free spot, but we
// reverse the index and the value.
dst[i] = n - m - 1
if m >= k-i {
idx -= Binomial(m, k-i)
}
}
return dst
}
// Cartesian returns the Cartesian product of the slices in data. The Cartesian
// product of two sets is the set of all combinations of the items. For example,
// given the input
//
// []int{2, 3, 1}
//
// the returned matrix will be
//
// [ 0 0 0 ]
// [ 0 1 0 ]
// [ 0 2 0 ]
// [ 1 0 0 ]
// [ 1 1 0 ]
// [ 1 2 0 ]
//
// Cartesian panics if any of the provided lengths are less than 1.
func Cartesian(lens []int) [][]int {
rows := Card(lens)
if rows == 0 {
panic("combin: empty lengths")
}
out := make([][]int, rows)
for i := 0; i < rows; i++ {
out[i] = SubFor(nil, i, lens)
}
return out
}
// Card computes the cardinality of the multi-dimensional space whose dimensions have size specified by dims
// All length values must be positive, otherwise this will panic.
func Card(dims []int) int {
if len(dims) == 0 {
return 0
}
card := 1
for _, v := range dims {
if v < 0 {
panic("combin: length less than zero")
}
card *= v
}
return card
}
// NewCartesianGenerator returns a CartesianGenerator for iterating over Cartesian products which are generated on the fly.
// All values in lens must be positive, otherwise this will panic.
func NewCartesianGenerator(lens []int) *CartesianGenerator {
return &CartesianGenerator{
lens: lens,
rows: Card(lens),
idx: -1,
}
}
// CartesianGenerator iterates over a Cartesian product set.
type CartesianGenerator struct {
lens []int
rows int
idx int
}
// Next moves to the next product of the Cartesian set.
// It returns false if the generator reached the end of the Cartesian set end.
func (g *CartesianGenerator) Next() bool {
if g.idx+1 < g.rows {
g.idx++
return true
}
g.idx = g.rows
return false
}
// Product generates one product of the Cartesian set according to the current index which is increased by Next().
// Next needs to be called at least one time before this method, otherwise it will panic.
func (g *CartesianGenerator) Product(dst []int) []int {
return SubFor(dst, g.idx, g.lens)
}
// IdxFor converts a multi-dimensional index into a linear index for a
// multi-dimensional space. sub specifies the index for each dimension, and dims
// specifies the size of each dimension. IdxFor is the inverse of SubFor.
// IdxFor panics if any of the entries of sub are negative, any of the entries
// of dim are non-positive, or if sub[i] >= dims[i] for any i.
func IdxFor(sub, dims []int) int {
// The index returned is "row-major", that is the last index of sub is
// continuous.
var idx int
stride := 1
for i := len(dims) - 1; i >= 0; i-- {
v := sub[i]
d := dims[i]
if d <= 0 {
panic(errNonpositiveDimension)
}
if v < 0 || v >= d {
panic("combin: invalid subscript")
}
idx += v * stride
stride *= d
}
return idx
}
// SubFor returns the multi-dimensional subscript for the input linear index to
// the multi-dimensional space. dims specifies the size of each dimension, and
// idx specifies the linear index. SubFor is the inverse of IdxFor.
//
// If sub is non-nil the result is stored in-place into sub, and SubFor will panic
// if len(sub) != len(dims). If sub is nil a new slice of the appropriate length
// is allocated. SubFor panics if idx < 0 or if idx is greater than or equal to
// the product of the dimensions.
func SubFor(sub []int, idx int, dims []int) []int {
if sub == nil {
sub = make([]int, len(dims))
}
if len(sub) != len(dims) {
panic(badInput)
}
if idx < 0 {
panic(errNegInput)
}
stride := 1
for i := len(dims) - 1; i >= 1; i-- {
stride *= dims[i]
}
for i := 0; i < len(dims)-1; i++ {
v := idx / stride
d := dims[i]
if d < 0 {
panic(errNonpositiveDimension)
}
if v >= dims[i] {
panic("combin: index too large")
}
sub[i] = v
idx -= v * stride
stride /= dims[i+1]
}
if idx > dims[len(sub)-1] {
panic("combin: index too large")
}
sub[len(sub)-1] = idx
return sub
}
// NumPermutations returns the number of permutations when selecting k
// objects from a set of n objects when the selection order matters.
// No check is made for overflow.
//
// NumPermutations panics if either n or k is negative, or if k is
// greater than n.
func NumPermutations(n, k int) int {
if n < 0 {
panic("combin: n is negative")
}
if k < 0 {
panic("combin: k is negative")
}
if k > n {
panic("combin: k is greater than n")
}
p := 1
for i := n - k + 1; i <= n; i++ {
p *= i
}
return p
}
// Permutations generates all of the permutations of k elements from a
// set of size n. The returned slice has length NumPermutations(n, k)
// and each inner slice has length k.
//
// n and k must be non-negative with n >= k, otherwise Permutations will panic.
//
// PermutationGenerator may alternatively be used to generate the permutations
// iteratively instead of collectively, or IndexToPermutation for random access.
func Permutations(n, k int) [][]int {
nPerms := NumPermutations(n, k)
data := make([][]int, nPerms)
if len(data) == 0 {
return data
}
for i := 0; i < nPerms; i++ {
data[i] = IndexToPermutation(nil, i, n, k)
}
return data
}
// PermutationGenerator generates permutations iteratively. The Permutations
// function may be called to generate all permutations collectively.
type PermutationGenerator struct {
n int
k int
nPerm int
idx int
permutation []int
}
// NewPermutationGenerator returns a PermutationGenerator for generating the
// permutations of k elements from a set of size n.
//
// n and k must be non-negative with n >= k, otherwise NewPermutationGenerator
// will panic.
func NewPermutationGenerator(n, k int) *PermutationGenerator {
return &PermutationGenerator{
n: n,
k: k,
nPerm: NumPermutations(n, k),
idx: -1,
permutation: make([]int, k),
}
}
// Next advances the iterator if there are permutations remaining to be generated,
// and returns false if all permutations have been generated. Next must be called
// to initialize the first value before calling Permutation or Permutation will
// panic. The value returned by Permutation is only changed during calls to Next.
func (p *PermutationGenerator) Next() bool {
if p.idx >= p.nPerm-1 {
p.idx = p.nPerm // so Permutation can panic.
return false
}
p.idx++
IndexToPermutation(p.permutation, p.idx, p.n, p.k)
return true
}
// Permutation returns the current permutation. If dst is non-nil, it must have
// length k and the result will be stored in-place into dst. If dst
// is nil a new slice will be allocated and returned. If all of the permutations
// have already been constructed (Next() returns false), Permutation will panic.
//
// Next must be called to initialize the first value before calling Permutation
// or Permutation will panic. The value returned by Permutation is only changed
// during calls to Next.
func (p *PermutationGenerator) Permutation(dst []int) []int {
if p.idx == p.nPerm {
panic("combin: all permutations have been generated")
}
if p.idx == -1 {
panic("combin: Permutation called before Next")
}
if dst == nil {
dst = make([]int, p.k)
} else if len(dst) != p.k {
panic(badInput)
}
copy(dst, p.permutation)
return dst
}
// PermutationIndex returns the index of the given permutation.
//
// The functions PermutationIndex and IndexToPermutation define a bijection
// between the integers and the NumPermutations(n, k) number of possible permutations.
// PermutationIndex returns the inverse of IndexToPermutation.
//
// PermutationIndex panics if perm is not a permutation of k of the first
// [0,n) integers, if n or k are negative, or if k is greater than n.
func PermutationIndex(perm []int, n, k int) int {
if n < 0 || k < 0 {
panic(errNegInput)
}
if n < k {
panic(badSetSize)
}
if len(perm) != k {
panic("combin: bad length permutation")
}
contains := make(map[int]struct{}, k)
for _, v := range perm {
if v < 0 || v >= n {
panic("combin: bad element")
}
contains[v] = struct{}{}
}
if len(contains) != k {
panic("combin: perm contains non-unique elements")
}
if n == k {
// The permutation is the ordering of the elements.
return equalPermutationIndex(perm)
}
// The permutation index is found by finding the combination index and the
// equalPermutation index. The combination index is found by just sorting
// the elements, and the permutation index is the ordering of the size
// of the elements.
tmp := make([]int, len(perm))
copy(tmp, perm)
idx := make([]int, len(perm))
for i := range idx {
idx[i] = i
}
s := sortInts{tmp, idx}
sort.Sort(s)
order := make([]int, len(perm))
for i, v := range idx {
order[v] = i
}
combIdx := CombinationIndex(tmp, n, k)
permIdx := equalPermutationIndex(order)
return combIdx*NumPermutations(k, k) + permIdx
}
type sortInts struct {
data []int
idx []int
}
func (s sortInts) Len() int {
return len(s.data)
}
func (s sortInts) Less(i, j int) bool {
return s.data[i] < s.data[j]
}
func (s sortInts) Swap(i, j int) {
s.data[i], s.data[j] = s.data[j], s.data[i]
s.idx[i], s.idx[j] = s.idx[j], s.idx[i]
}
// IndexToPermutation returns the permutation corresponding to the given index.
//
// The functions PermutationIndex and IndexToPermutation define a bijection
// between the integers and the NumPermutations(n, k) number of possible permutations.
// IndexToPermutation returns the inverse of PermutationIndex.
//
// The permutation is stored in-place into dst if dst is non-nil, otherwise
// a new slice is allocated and returned.
//
// IndexToPermutation panics if n or k are negative, if k is greater than n,
// or if idx is not in [0, NumPermutations(n,k)-1]. IndexToPermutation will also panic
// if dst is non-nil and len(dst) is not k.
func IndexToPermutation(dst []int, idx, n, k int) []int {
nPerm := NumPermutations(n, k)
if idx < 0 || idx >= nPerm {
panic("combin: invalid index")
}
if dst == nil {
dst = make([]int, k)
} else if len(dst) != k {
panic(badInput)
}
if n == k {
indexToEqualPermutation(dst, idx)
return dst
}
// First, we index into the combination (which of the k items to choose)
// and then we index into the n == k permutation of those k items. The
// indexing acts like a matrix with nComb rows and factorial(k) columns.
kPerm := NumPermutations(k, k)
combIdx := idx / kPerm
permIdx := idx % kPerm
comb := IndexToCombination(nil, combIdx, n, k) // Gives us the set of integers.
perm := make([]int, len(dst))
indexToEqualPermutation(perm, permIdx) // Gives their order.
for i, v := range perm {
dst[i] = comb[v]
}
return dst
}
// equalPermutationIndex returns the index of the given permutation of the
// first k integers.
func equalPermutationIndex(perm []int) int {
// Note(btracey): This is an n^2 algorithm, but factorial increases
// very quickly (25! overflows int64) so this is not a problem in
// practice.
idx := 0
for i, u := range perm {
less := 0
for _, v := range perm[i:] {
if v < u {
less++
}
}
idx += less * factorial(len(perm)-i-1)
}
return idx
}
// indexToEqualPermutation returns the permutation for the first len(dst)
// integers for the given index.
func indexToEqualPermutation(dst []int, idx int) {
for i := range dst {
dst[i] = i
}
for i := range dst {
f := factorial(len(dst) - i - 1)
r := idx / f
v := dst[i+r]
copy(dst[i+1:i+r+1], dst[i:i+r])
dst[i] = v
idx %= f
}
}
// factorial returns a!.
func factorial(a int) int {
f := 1
for i := 2; i <= a; i++ {
f *= i
}
return f
}

7
vendor/gonum.org/v1/gonum/stat/combin/doc.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package combin implements routines involving combinatorics (permutations,
// combinations, etc.).
package combin // import "gonum.org/v1/gonum/stat/combin"

150
vendor/gonum.org/v1/gonum/stat/distmv/dirichlet.go generated vendored Normal file
View File

@@ -0,0 +1,150 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distmv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/stat/distuv"
)
// Dirichlet implements the Dirichlet probability distribution.
//
// The Dirichlet distribution is a continuous probability distribution that
// generates elements over the probability simplex, i.e. ||x||_1 = 1. The Dirichlet
// distribution is the conjugate prior to the categorical distribution and the
// multivariate version of the beta distribution. The probability of a point x is
//
// 1/Beta(α) \prod_i x_i^(α_i - 1)
//
// where Beta(α) is the multivariate Beta function (see the mathext package).
//
// For more information see https://en.wikipedia.org/wiki/Dirichlet_distribution
type Dirichlet struct {
alpha []float64
dim int
src rand.Source
lbeta float64
sumAlpha float64
}
// NewDirichlet creates a new dirichlet distribution with the given parameters alpha.
// NewDirichlet will panic if len(alpha) == 0, or if any alpha is <= 0.
func NewDirichlet(alpha []float64, src rand.Source) *Dirichlet {
dim := len(alpha)
if dim == 0 {
panic(badZeroDimension)
}
for _, v := range alpha {
if v <= 0 {
panic("dirichlet: non-positive alpha")
}
}
a := make([]float64, len(alpha))
copy(a, alpha)
d := &Dirichlet{
alpha: a,
dim: dim,
src: src,
}
d.lbeta, d.sumAlpha = d.genLBeta(a)
return d
}
// CovarianceMatrix calculates the covariance matrix of the distribution,
// storing the result in dst. Upon return, the value at element {i, j} of the
// covariance matrix is equal to the covariance of the i^th and j^th variables.
//
// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
//
// If the dst matrix is empty it will be resized to the correct dimensions,
// otherwise dst must match the dimension of the receiver or CovarianceMatrix
// will panic.
func (d *Dirichlet) CovarianceMatrix(dst *mat.SymDense) {
if dst.IsEmpty() {
*dst = *(dst.GrowSym(d.dim).(*mat.SymDense))
} else if dst.SymmetricDim() != d.dim {
panic("dirichelet: input matrix size mismatch")
}
scale := 1 / (d.sumAlpha * d.sumAlpha * (d.sumAlpha + 1))
for i := 0; i < d.dim; i++ {
ai := d.alpha[i]
v := ai * (d.sumAlpha - ai) * scale
dst.SetSym(i, i, v)
for j := i + 1; j < d.dim; j++ {
aj := d.alpha[j]
v := -ai * aj * scale
dst.SetSym(i, j, v)
}
}
}
// genLBeta computes the generalized LBeta function.
func (d *Dirichlet) genLBeta(alpha []float64) (lbeta, sumAlpha float64) {
for _, alpha := range d.alpha {
lg, _ := math.Lgamma(alpha)
lbeta += lg
sumAlpha += alpha
}
lg, _ := math.Lgamma(sumAlpha)
return lbeta - lg, sumAlpha
}
// Dim returns the dimension of the distribution.
func (d *Dirichlet) Dim() int {
return d.dim
}
// LogProb computes the log of the pdf of the point x.
//
// It does not check that ||x||_1 = 1.
func (d *Dirichlet) LogProb(x []float64) float64 {
dim := d.dim
if len(x) != dim {
panic(badSizeMismatch)
}
var lprob float64
for i, x := range x {
lprob += (d.alpha[i] - 1) * math.Log(x)
}
lprob -= d.lbeta
return lprob
}
// Mean returns the mean of the probability distribution.
//
// If dst is not nil, the mean will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (d *Dirichlet) Mean(dst []float64) []float64 {
dst = reuseAs(dst, d.dim)
floats.ScaleTo(dst, 1/d.sumAlpha, d.alpha)
return dst
}
// Prob computes the value of the probability density function at x.
func (d *Dirichlet) Prob(x []float64) float64 {
return math.Exp(d.LogProb(x))
}
// Rand generates a random number according to the distributon.
//
// If dst is not nil, the sample will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (d *Dirichlet) Rand(dst []float64) []float64 {
dst = reuseAs(dst, d.dim)
for i, alpha := range d.alpha {
dst[i] = distuv.Gamma{Alpha: alpha, Beta: 1, Src: d.src}.Rand()
}
sum := floats.Sum(dst)
floats.Scale(1/sum, dst)
return dst
}

28
vendor/gonum.org/v1/gonum/stat/distmv/distmv.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distmv
const (
badQuantile = "distmv: quantile not between 0 and 1"
badOutputLen = "distmv: output slice is not nil or the correct length"
badInputLength = "distmv: input slice length mismatch"
badSizeMismatch = "distmv: size mismatch"
badZeroDimension = "distmv: zero dimensional input"
nonPosDimension = "distmv: non-positive dimension input"
)
const logTwoPi = 1.8378770664093454835606594728112352797227949472755668
// reuseAs returns a slice of length n. If len(dst) is n, dst is returned,
// otherwise dst must be nil or reuseAs will panic.
func reuseAs(dst []float64, n int) []float64 {
if dst == nil {
dst = make([]float64, n)
}
if len(dst) != n {
panic(badOutputLen)
}
return dst
}

6
vendor/gonum.org/v1/gonum/stat/distmv/doc.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package distmv provides multivariate random distribution types.
package distmv // import "gonum.org/v1/gonum/stat/distmv"

33
vendor/gonum.org/v1/gonum/stat/distmv/interfaces.go generated vendored Normal file
View File

@@ -0,0 +1,33 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distmv
// Quantiler returns the multi-dimensional inverse cumulative distribution function.
// len(x) must equal len(p), and if x is non-nil, len(x) must also equal len(p).
// If x is nil, a new slice will be allocated and returned, otherwise the quantile
// will be stored in-place into x. All of the values of p must be between 0 and 1,
// or Quantile will panic.
type Quantiler interface {
Quantile(x, p []float64) []float64
}
// LogProber computes the log of the probability of the point x.
type LogProber interface {
LogProb(x []float64) float64
}
// Rander generates a random number according to the distributon.
// If the input is non-nil, len(x) must equal len(p) and the dimension of the distribution,
// otherwise Quantile will panic.
// If the input is nil, a new slice will be allocated and returned.
type Rander interface {
Rand(x []float64) []float64
}
// RandLogProber is both a Rander and a LogProber.
type RandLogProber interface {
Rander
LogProber
}

525
vendor/gonum.org/v1/gonum/stat/distmv/normal.go generated vendored Normal file
View File

@@ -0,0 +1,525 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distmv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/stat"
"gonum.org/v1/gonum/stat/distuv"
)
// Normal is a multivariate normal distribution (also known as the multivariate
// Gaussian distribution). Its pdf in k dimensions is given by
//
// (2 π)^(-k/2) |Σ|^(-1/2) exp(-1/2 (x-μ)'Σ^-1(x-μ))
//
// where μ is the mean vector and Σ the covariance matrix. Σ must be symmetric
// and positive definite. Use NewNormal to construct.
type Normal struct {
mu []float64
sigma mat.SymDense
chol mat.Cholesky
logSqrtDet float64
dim int
// If src is altered, rnd must be updated.
src rand.Source
rnd *rand.Rand
}
// NewNormal creates a new Normal with the given mean and covariance matrix.
// NewNormal panics if len(mu) == 0, or if len(mu) != sigma.N. If the covariance
// matrix is not positive-definite, the returned boolean is false.
func NewNormal(mu []float64, sigma mat.Symmetric, src rand.Source) (*Normal, bool) {
if len(mu) == 0 {
panic(badZeroDimension)
}
dim := sigma.SymmetricDim()
if dim != len(mu) {
panic(badSizeMismatch)
}
n := &Normal{
src: src,
rnd: rand.New(src),
dim: dim,
mu: make([]float64, dim),
}
copy(n.mu, mu)
ok := n.chol.Factorize(sigma)
if !ok {
return nil, false
}
n.sigma = *mat.NewSymDense(dim, nil)
n.sigma.CopySym(sigma)
n.logSqrtDet = 0.5 * n.chol.LogDet()
return n, true
}
// NewNormalChol creates a new Normal distribution with the given mean and
// covariance matrix represented by its Cholesky decomposition. NewNormalChol
// panics if len(mu) is not equal to chol.Size().
func NewNormalChol(mu []float64, chol *mat.Cholesky, src rand.Source) *Normal {
dim := len(mu)
if dim != chol.SymmetricDim() {
panic(badSizeMismatch)
}
n := &Normal{
src: src,
rnd: rand.New(src),
dim: dim,
mu: make([]float64, dim),
}
n.chol.Clone(chol)
copy(n.mu, mu)
n.logSqrtDet = 0.5 * n.chol.LogDet()
return n
}
// NewNormalPrecision creates a new Normal distribution with the given mean and
// precision matrix (inverse of the covariance matrix). NewNormalPrecision
// panics if len(mu) is not equal to prec.SymmetricDim(). If the precision matrix
// is not positive-definite, NewNormalPrecision returns nil for norm and false
// for ok.
func NewNormalPrecision(mu []float64, prec *mat.SymDense, src rand.Source) (norm *Normal, ok bool) {
if len(mu) == 0 {
panic(badZeroDimension)
}
dim := prec.SymmetricDim()
if dim != len(mu) {
panic(badSizeMismatch)
}
// TODO(btracey): Computing a matrix inverse is generally numerically unstable.
// This only has to compute the inverse of a positive definite matrix, which
// is much better, but this still loses precision. It is worth considering if
// instead the precision matrix should be stored explicitly and used instead
// of the Cholesky decomposition of the covariance matrix where appropriate.
var chol mat.Cholesky
ok = chol.Factorize(prec)
if !ok {
return nil, false
}
var sigma mat.SymDense
err := chol.InverseTo(&sigma)
if err != nil {
return nil, false
}
return NewNormal(mu, &sigma, src)
}
// ConditionNormal returns the Normal distribution that is the receiver conditioned
// on the input evidence. The returned multivariate normal has dimension
// n - len(observed), where n is the dimension of the original receiver. The updated
// mean and covariance are
//
// mu = mu_un + sigma_{ob,un}ᵀ * sigma_{ob,ob}^-1 (v - mu_ob)
// sigma = sigma_{un,un} - sigma_{ob,un}ᵀ * sigma_{ob,ob}^-1 * sigma_{ob,un}
//
// where mu_un and mu_ob are the original means of the unobserved and observed
// variables respectively, sigma_{un,un} is the unobserved subset of the covariance
// matrix, sigma_{ob,ob} is the observed subset of the covariance matrix, and
// sigma_{un,ob} are the cross terms. The elements of x_2 have been observed with
// values v. The dimension order is preserved during conditioning, so if the value
// of dimension 1 is observed, the returned normal represents dimensions {0, 2, ...}
// of the original Normal distribution.
//
// ConditionNormal returns {nil, false} if there is a failure during the update.
// Mathematically this is impossible, but can occur with finite precision arithmetic.
func (n *Normal) ConditionNormal(observed []int, values []float64, src rand.Source) (*Normal, bool) {
if len(observed) == 0 {
panic("normal: no observed value")
}
if len(observed) != len(values) {
panic(badInputLength)
}
for _, v := range observed {
if v < 0 || v >= n.Dim() {
panic("normal: observed value out of bounds")
}
}
_, mu1, sigma11 := studentsTConditional(observed, values, math.Inf(1), n.mu, &n.sigma)
if mu1 == nil {
return nil, false
}
return NewNormal(mu1, sigma11, src)
}
// CovarianceMatrix stores the covariance matrix of the distribution in dst.
// Upon return, the value at element {i, j} of the covariance matrix is equal
// to the covariance of the i^th and j^th variables.
//
// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
//
// If the dst matrix is empty it will be resized to the correct dimensions,
// otherwise dst must match the dimension of the receiver or CovarianceMatrix
// will panic.
func (n *Normal) CovarianceMatrix(dst *mat.SymDense) {
if dst.IsEmpty() {
*dst = *(dst.GrowSym(n.dim).(*mat.SymDense))
} else if dst.SymmetricDim() != n.dim {
panic("normal: input matrix size mismatch")
}
dst.CopySym(&n.sigma)
}
// Dim returns the dimension of the distribution.
func (n *Normal) Dim() int {
return n.dim
}
// Entropy returns the differential entropy of the distribution.
func (n *Normal) Entropy() float64 {
return float64(n.dim)/2*(1+logTwoPi) + n.logSqrtDet
}
// LogProb computes the log of the pdf of the point x.
func (n *Normal) LogProb(x []float64) float64 {
dim := n.dim
if len(x) != dim {
panic(badSizeMismatch)
}
return normalLogProb(x, n.mu, &n.chol, n.logSqrtDet)
}
// NormalLogProb computes the log probability of the location x for a Normal
// distribution the given mean and Cholesky decomposition of the covariance matrix.
// NormalLogProb panics if len(x) is not equal to len(mu), or if len(mu) != chol.Size().
//
// This function saves time and memory if the Cholesky decomposition is already
// available. Otherwise, the NewNormal function should be used.
func NormalLogProb(x, mu []float64, chol *mat.Cholesky) float64 {
dim := len(mu)
if len(x) != dim {
panic(badSizeMismatch)
}
if chol.SymmetricDim() != dim {
panic(badSizeMismatch)
}
logSqrtDet := 0.5 * chol.LogDet()
return normalLogProb(x, mu, chol, logSqrtDet)
}
// normalLogProb is the same as NormalLogProb, but does not make size checks and
// additionally requires log(|Σ|^-0.5)
func normalLogProb(x, mu []float64, chol *mat.Cholesky, logSqrtDet float64) float64 {
dim := len(mu)
c := -0.5*float64(dim)*logTwoPi - logSqrtDet
dst := stat.Mahalanobis(mat.NewVecDense(dim, x), mat.NewVecDense(dim, mu), chol)
return c - 0.5*dst*dst
}
// MarginalNormal returns the marginal distribution of the given input variables.
// That is, MarginalNormal returns
//
// p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o
//
// where x_i are the dimensions in the input, and x_o are the remaining dimensions.
// See https://en.wikipedia.org/wiki/Marginal_distribution for more information.
//
// The input src is passed to the call to NewNormal.
func (n *Normal) MarginalNormal(vars []int, src rand.Source) (*Normal, bool) {
newMean := make([]float64, len(vars))
for i, v := range vars {
newMean[i] = n.mu[v]
}
var s mat.SymDense
s.SubsetSym(&n.sigma, vars)
return NewNormal(newMean, &s, src)
}
// MarginalNormalSingle returns the marginal of the given input variable.
// That is, MarginalNormal returns
//
// p(x_i) = \int_{x_¬i} p(x_i | x_¬i) p(x_¬i) dx_¬i
//
// where i is the input index.
// See https://en.wikipedia.org/wiki/Marginal_distribution for more information.
//
// The input src is passed to the constructed distuv.Normal.
func (n *Normal) MarginalNormalSingle(i int, src rand.Source) distuv.Normal {
return distuv.Normal{
Mu: n.mu[i],
Sigma: math.Sqrt(n.sigma.At(i, i)),
Src: src,
}
}
// Mean returns the mean of the probability distribution.
//
// If dst is not nil, the mean will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (n *Normal) Mean(dst []float64) []float64 {
dst = reuseAs(dst, n.dim)
copy(dst, n.mu)
return dst
}
// Prob computes the value of the probability density function at x.
func (n *Normal) Prob(x []float64) float64 {
return math.Exp(n.LogProb(x))
}
// Quantile returns the value of the multi-dimensional inverse cumulative
// distribution function at p.
//
// If dst is not nil, the quantile will be stored in-place into dst and
// returned, otherwise a new slice will be allocated first. If dst is not nil,
// it must have length equal to the dimension of the distribution. Quantile will
// also panic if the length of p is not equal to the dimension of the
// distribution.
//
// All of the values of p must be between 0 and 1, inclusive, or Quantile will
// panic.
func (n *Normal) Quantile(dst, p []float64) []float64 {
if len(p) != n.dim {
panic(badInputLength)
}
dst = reuseAs(dst, n.dim)
// Transform to a standard normal and then transform to a multivariate Gaussian.
for i, v := range p {
dst[i] = distuv.UnitNormal.Quantile(v)
}
n.TransformNormal(dst, dst)
return dst
}
// Rand generates a random sample according to the distributon.
//
// If dst is not nil, the sample will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (n *Normal) Rand(dst []float64) []float64 {
return NormalRand(dst, n.mu, &n.chol, n.src)
}
// NormalRand generates a random sample from a multivariate normal distributon
// given by the mean and the Cholesky factorization of the covariance matrix.
//
// If dst is not nil, the sample will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
//
// This function saves time and memory if the Cholesky factorization is already
// available. Otherwise, the NewNormal function should be used.
func NormalRand(dst, mean []float64, chol *mat.Cholesky, src rand.Source) []float64 {
if len(mean) != chol.SymmetricDim() {
panic(badInputLength)
}
dst = reuseAs(dst, len(mean))
if src == nil {
for i := range dst {
dst[i] = rand.NormFloat64()
}
} else {
rnd := rand.New(src)
for i := range dst {
dst[i] = rnd.NormFloat64()
}
}
transformNormal(dst, dst, mean, chol)
return dst
}
// EigenSym is an eigendecomposition of a symmetric matrix.
type EigenSym interface {
mat.Symmetric
// RawValues returns all eigenvalues in ascending order. The returned slice
// must not be modified.
RawValues() []float64
// RawQ returns an orthogonal matrix whose columns contain the eigenvectors.
// The returned matrix must not be modified.
RawQ() mat.Matrix
}
// PositivePartEigenSym is an EigenSym that sets any negative eigenvalues from
// the given eigendecomposition to zero but otherwise returns the values
// unchanged.
//
// This is useful for filtering eigenvalues of positive semi-definite matrices
// that are almost zero but negative due to rounding errors.
type PositivePartEigenSym struct {
ed *mat.EigenSym
vals []float64
}
var _ EigenSym = (*PositivePartEigenSym)(nil)
var _ EigenSym = (*mat.EigenSym)(nil)
// NewPositivePartEigenSym returns a new PositivePartEigenSym, wrapping the
// given eigendecomposition.
func NewPositivePartEigenSym(ed *mat.EigenSym) *PositivePartEigenSym {
n := ed.SymmetricDim()
vals := make([]float64, n)
for i, lamda := range ed.RawValues() {
if lamda > 0 {
vals[i] = lamda
}
}
return &PositivePartEigenSym{
ed: ed,
vals: vals,
}
}
// SymmetricDim returns the value from the wrapped eigendecomposition.
func (ed *PositivePartEigenSym) SymmetricDim() int { return ed.ed.SymmetricDim() }
// Dims returns the dimensions from the wrapped eigendecomposition.
func (ed *PositivePartEigenSym) Dims() (r, c int) { return ed.ed.Dims() }
// At returns the value from the wrapped eigendecomposition.
func (ed *PositivePartEigenSym) At(i, j int) float64 { return ed.ed.At(i, j) }
// T returns the transpose from the wrapped eigendecomposition.
func (ed *PositivePartEigenSym) T() mat.Matrix { return ed.ed.T() }
// RawQ returns the orthogonal matrix Q from the wrapped eigendecomposition. The
// returned matrix must not be modified.
func (ed *PositivePartEigenSym) RawQ() mat.Matrix { return ed.ed.RawQ() }
// RawValues returns the eigenvalues from the wrapped eigendecomposition in
// ascending order with any negative value replaced by zero. The returned slice
// must not be modified.
func (ed *PositivePartEigenSym) RawValues() []float64 { return ed.vals }
// NormalRandCov generates a random sample from a multivariate normal
// distribution given by the mean and the covariance matrix.
//
// If dst is not nil, the sample will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
//
// cov should be *mat.Cholesky, *mat.PivotedCholesky or EigenSym, otherwise
// NormalRandCov will be very inefficient because a pivoted Cholesky
// factorization of cov will be computed for every sample.
//
// If cov is an EigenSym, all eigenvalues returned by RawValues must be
// non-negative, otherwise NormalRandCov will panic.
func NormalRandCov(dst, mean []float64, cov mat.Symmetric, src rand.Source) []float64 {
n := len(mean)
if cov.SymmetricDim() != n {
panic(badInputLength)
}
dst = reuseAs(dst, n)
if src == nil {
for i := range dst {
dst[i] = rand.NormFloat64()
}
} else {
rnd := rand.New(src)
for i := range dst {
dst[i] = rnd.NormFloat64()
}
}
switch cov := cov.(type) {
case *mat.Cholesky:
dstVec := mat.NewVecDense(n, dst)
dstVec.MulVec(cov.RawU().T(), dstVec)
case *mat.PivotedCholesky:
dstVec := mat.NewVecDense(n, dst)
dstVec.MulVec(cov.RawU().T(), dstVec)
dstVec.Permute(cov.ColumnPivots(nil), true)
case EigenSym:
vals := cov.RawValues()
if vals[0] < 0 {
panic("distmv: covariance matrix is not positive semi-definite")
}
for i, val := range vals {
dst[i] *= math.Sqrt(val)
}
dstVec := mat.NewVecDense(n, dst)
dstVec.MulVec(cov.RawQ(), dstVec)
default:
var chol mat.PivotedCholesky
chol.Factorize(cov, -1)
dstVec := mat.NewVecDense(n, dst)
dstVec.MulVec(chol.RawU().T(), dstVec)
dstVec.Permute(chol.ColumnPivots(nil), true)
}
floats.Add(dst, mean)
return dst
}
// ScoreInput returns the gradient of the log-probability with respect to the
// input x. That is, ScoreInput computes
//
// ∇_x log(p(x))
//
// If dst is not nil, the score will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (n *Normal) ScoreInput(dst, x []float64) []float64 {
// Normal log probability is
// c - 0.5*(x-μ)' Σ^-1 (x-μ).
// So the derivative is just
// -Σ^-1 (x-μ).
if len(x) != n.Dim() {
panic(badInputLength)
}
dst = reuseAs(dst, n.dim)
floats.SubTo(dst, x, n.mu)
dstVec := mat.NewVecDense(len(dst), dst)
err := n.chol.SolveVecTo(dstVec, dstVec)
if err != nil {
panic(err)
}
floats.Scale(-1, dst)
return dst
}
// SetMean changes the mean of the normal distribution. SetMean panics if len(mu)
// does not equal the dimension of the normal distribution.
func (n *Normal) SetMean(mu []float64) {
if len(mu) != n.Dim() {
panic(badSizeMismatch)
}
copy(n.mu, mu)
}
// TransformNormal transforms x generated from a standard multivariate normal
// into a vector that has been generated under the normal distribution of the
// receiver.
//
// If dst is not nil, the result will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution. TransformNormal will
// also panic if the length of x is not equal to the dimension of the receiver.
func (n *Normal) TransformNormal(dst, x []float64) []float64 {
if len(x) != n.dim {
panic(badInputLength)
}
dst = reuseAs(dst, n.dim)
transformNormal(dst, x, n.mu, &n.chol)
return dst
}
// transformNormal performs the same operation as Normal.TransformNormal except
// no safety checks are performed and all memory must be provided.
func transformNormal(dst, normal, mu []float64, chol *mat.Cholesky) []float64 {
dim := len(mu)
dstVec := mat.NewVecDense(dim, dst)
srcVec := mat.NewVecDense(dim, normal)
// If dst and normal are the same slice, make them the same Vector otherwise
// mat complains about being tricky.
if &normal[0] == &dst[0] {
srcVec = dstVec
}
dstVec.MulVec(chol.RawU().T(), srcVec)
floats.Add(dst, mu)
return dst
}

390
vendor/gonum.org/v1/gonum/stat/distmv/statdist.go generated vendored Normal file
View File

@@ -0,0 +1,390 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distmv
import (
"math"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/mathext"
"gonum.org/v1/gonum/spatial/r1"
"gonum.org/v1/gonum/stat"
)
// Bhattacharyya is a type for computing the Bhattacharyya distance between
// probability distributions.
//
// The Bhattacharyya distance is defined as
//
// D_B = -ln(BC(l,r))
// BC = \int_-∞^∞ (p(x)q(x))^(1/2) dx
//
// Where BC is known as the Bhattacharyya coefficient.
// The Bhattacharyya distance is related to the Hellinger distance by
//
// H(l,r) = sqrt(1-BC(l,r))
//
// For more information, see
//
// https://en.wikipedia.org/wiki/Bhattacharyya_distance
type Bhattacharyya struct{}
// DistNormal computes the Bhattacharyya distance between normal distributions l and r.
// The dimensions of the input distributions must match or DistNormal will panic.
//
// For Normal distributions, the Bhattacharyya distance is
//
// Σ = (Σ_l + Σ_r)/2
// D_B = (1/8)*(μ_l - μ_r)ᵀ*Σ^-1*(μ_l - μ_r) + (1/2)*ln(det(Σ)/(det(Σ_l)*det(Σ_r))^(1/2))
func (Bhattacharyya) DistNormal(l, r *Normal) float64 {
dim := l.Dim()
if dim != r.Dim() {
panic(badSizeMismatch)
}
var sigma mat.SymDense
sigma.AddSym(&l.sigma, &r.sigma)
sigma.ScaleSym(0.5, &sigma)
var chol mat.Cholesky
chol.Factorize(&sigma)
mahalanobis := stat.Mahalanobis(mat.NewVecDense(dim, l.mu), mat.NewVecDense(dim, r.mu), &chol)
mahalanobisSq := mahalanobis * mahalanobis
dl := l.chol.LogDet()
dr := r.chol.LogDet()
ds := chol.LogDet()
return 0.125*mahalanobisSq + 0.5*ds - 0.25*dl - 0.25*dr
}
// DistUniform computes the Bhattacharyya distance between uniform distributions l and r.
// The dimensions of the input distributions must match or DistUniform will panic.
func (Bhattacharyya) DistUniform(l, r *Uniform) float64 {
if len(l.bounds) != len(r.bounds) {
panic(badSizeMismatch)
}
// BC = \int \sqrt(p(x)q(x)), which for uniform distributions is a constant
// over the volume where both distributions have positive probability.
// Compute the overlap and the value of sqrt(p(x)q(x)). The entropy is the
// negative log probability of the distribution (use instead of LogProb so
// it is not necessary to construct an x value).
//
// BC = volume * sqrt(p(x)q(x))
// logBC = log(volume) + 0.5*(logP + logQ)
// D_B = -logBC
return -unifLogVolOverlap(l.bounds, r.bounds) + 0.5*(l.Entropy()+r.Entropy())
}
// unifLogVolOverlap computes the log of the volume of the hyper-rectangle where
// both uniform distributions have positive probability.
func unifLogVolOverlap(b1, b2 []r1.Interval) float64 {
var logVolOverlap float64
for dim, v1 := range b1 {
v2 := b2[dim]
// If the surfaces don't overlap, then the volume is 0
if v1.Max <= v2.Min || v2.Max <= v1.Min {
return math.Inf(-1)
}
vol := math.Min(v1.Max, v2.Max) - math.Max(v1.Min, v2.Min)
logVolOverlap += math.Log(vol)
}
return logVolOverlap
}
// CrossEntropy is a type for computing the cross-entropy between probability
// distributions.
//
// The cross-entropy is defined as
// - \int_x l(x) log(r(x)) dx = KL(l || r) + H(l)
//
// where KL is the Kullback-Leibler divergence and H is the entropy.
// For more information, see
//
// https://en.wikipedia.org/wiki/Cross_entropy
type CrossEntropy struct{}
// DistNormal returns the cross-entropy between normal distributions l and r.
// The dimensions of the input distributions must match or DistNormal will panic.
func (CrossEntropy) DistNormal(l, r *Normal) float64 {
if l.Dim() != r.Dim() {
panic(badSizeMismatch)
}
kl := KullbackLeibler{}.DistNormal(l, r)
return kl + l.Entropy()
}
// Hellinger is a type for computing the Hellinger distance between probability
// distributions.
//
// The Hellinger distance is defined as
//
// H^2(l,r) = 1/2 * int_x (\sqrt(l(x)) - \sqrt(r(x)))^2 dx
//
// and is bounded between 0 and 1. Note the above formula defines the squared
// Hellinger distance, while this returns the Hellinger distance itself.
// The Hellinger distance is related to the Bhattacharyya distance by
//
// H^2 = 1 - exp(-D_B)
//
// For more information, see
//
// https://en.wikipedia.org/wiki/Hellinger_distance
type Hellinger struct{}
// DistNormal returns the Hellinger distance between normal distributions l and r.
// The dimensions of the input distributions must match or DistNormal will panic.
//
// See the documentation of Bhattacharyya.DistNormal for the formula for Normal
// distributions.
func (Hellinger) DistNormal(l, r *Normal) float64 {
if l.Dim() != r.Dim() {
panic(badSizeMismatch)
}
db := Bhattacharyya{}.DistNormal(l, r)
bc := math.Exp(-db)
return math.Sqrt(1 - bc)
}
// KullbackLeibler is a type for computing the Kullback-Leibler divergence from l to r.
//
// The Kullback-Leibler divergence is defined as
//
// D_KL(l || r ) = \int_x p(x) log(p(x)/q(x)) dx
//
// Note that the Kullback-Leibler divergence is not symmetric with respect to
// the order of the input arguments.
type KullbackLeibler struct{}
// DistDirichlet returns the Kullback-Leibler divergence between Dirichlet
// distributions l and r. The dimensions of the input distributions must match
// or DistDirichlet will panic.
//
// For two Dirichlet distributions, the KL divergence is computed as
//
// D_KL(l || r) = log Γ(α_0_l) - \sum_i log Γ(α_i_l) - log Γ(α_0_r) + \sum_i log Γ(α_i_r)
// + \sum_i (α_i_l - α_i_r)(ψ(α_i_l)- ψ(α_0_l))
//
// Where Γ is the gamma function, ψ is the digamma function, and α_0 is the
// sum of the Dirichlet parameters.
func (KullbackLeibler) DistDirichlet(l, r *Dirichlet) float64 {
// http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/
if l.Dim() != r.Dim() {
panic(badSizeMismatch)
}
l0, _ := math.Lgamma(l.sumAlpha)
r0, _ := math.Lgamma(r.sumAlpha)
dl := mathext.Digamma(l.sumAlpha)
var l1, r1, c float64
for i, al := range l.alpha {
ar := r.alpha[i]
vl, _ := math.Lgamma(al)
l1 += vl
vr, _ := math.Lgamma(ar)
r1 += vr
c += (al - ar) * (mathext.Digamma(al) - dl)
}
return l0 - l1 - r0 + r1 + c
}
// DistNormal returns the KullbackLeibler divergence between normal distributions l and r.
// The dimensions of the input distributions must match or DistNormal will panic.
//
// For two normal distributions, the KL divergence is computed as
//
// D_KL(l || r) = 0.5*[ln(|Σ_r|) - ln(|Σ_l|) + (μ_l - μ_r)ᵀ*Σ_r^-1*(μ_l - μ_r) + tr(Σ_r^-1*Σ_l)-d]
func (KullbackLeibler) DistNormal(l, r *Normal) float64 {
dim := l.Dim()
if dim != r.Dim() {
panic(badSizeMismatch)
}
mahalanobis := stat.Mahalanobis(mat.NewVecDense(dim, l.mu), mat.NewVecDense(dim, r.mu), &r.chol)
mahalanobisSq := mahalanobis * mahalanobis
// TODO(btracey): Optimize where there is a SolveCholeskySym
// TODO(btracey): There may be a more efficient way to just compute the trace
// Compute tr(Σ_r^-1*Σ_l) using the fact that Σ_l = Uᵀ * U
var u mat.TriDense
l.chol.UTo(&u)
var m mat.Dense
err := r.chol.SolveTo(&m, u.T())
if err != nil {
return math.NaN()
}
m.Mul(&m, &u)
tr := mat.Trace(&m)
return r.logSqrtDet - l.logSqrtDet + 0.5*(mahalanobisSq+tr-float64(l.dim))
}
// DistUniform returns the KullbackLeibler divergence between uniform distributions
// l and r. The dimensions of the input distributions must match or DistUniform
// will panic.
func (KullbackLeibler) DistUniform(l, r *Uniform) float64 {
bl := l.Bounds(nil)
br := r.Bounds(nil)
if len(bl) != len(br) {
panic(badSizeMismatch)
}
// The KL is ∞ if l is not completely contained within r, because then
// r(x) is zero when l(x) is non-zero for some x.
contained := true
for i, v := range bl {
if v.Min < br[i].Min || br[i].Max < v.Max {
contained = false
break
}
}
if !contained {
return math.Inf(1)
}
// The KL divergence is finite.
//
// KL defines 0*ln(0) = 0, so there is no contribution to KL where l(x) = 0.
// Inside the region, l(x) and r(x) are constant (uniform distribution), and
// this constant is integrated over l(x), which integrates out to one.
// The entropy is -log(p(x)).
logPx := -l.Entropy()
logQx := -r.Entropy()
return logPx - logQx
}
// Renyi is a type for computing the Rényi divergence of order α from l to r.
//
// The Rényi divergence with α > 0, α ≠ 1 is defined as
//
// D_α(l || r) = 1/(α-1) log(\int_-∞^∞ l(x)^α r(x)^(1-α)dx)
//
// The Rényi divergence has special forms for α = 0 and α = 1. This type does
// not implement α = ∞. For α = 0,
//
// D_0(l || r) = -log \int_-∞^∞ r(x)1{p(x)>0} dx
//
// that is, the negative log probability under r(x) that l(x) > 0.
// When α = 1, the Rényi divergence is equal to the Kullback-Leibler divergence.
// The Rényi divergence is also equal to half the Bhattacharyya distance when α = 0.5.
//
// The parameter α must be in 0 ≤ α < ∞ or the distance functions will panic.
type Renyi struct {
Alpha float64
}
// DistNormal returns the Rényi divergence between normal distributions l and r.
// The dimensions of the input distributions must match or DistNormal will panic.
//
// For two normal distributions, the Rényi divergence is computed as
//
// Σ_α = (1-α) Σ_l + αΣ_r
// D_α(l||r) = α/2 * (μ_l - μ_r)'*Σ_α^-1*(μ_l - μ_r) + 1/(2(α-1))*ln(|Σ_λ|/(|Σ_l|^(1-α)*|Σ_r|^α))
//
// For a more nicely formatted version of the formula, see Eq. 15 of
//
// Kolchinsky, Artemy, and Brendan D. Tracey. "Estimating Mixture Entropy
// with Pairwise Distances." arXiv preprint arXiv:1706.02419 (2017).
//
// Note that the this formula is for Chernoff divergence, which differs from
// Rényi divergence by a factor of 1-α. Also be aware that most sources in
// the literature report this formula incorrectly.
func (renyi Renyi) DistNormal(l, r *Normal) float64 {
if renyi.Alpha < 0 {
panic("renyi: alpha < 0")
}
dim := l.Dim()
if dim != r.Dim() {
panic(badSizeMismatch)
}
if renyi.Alpha == 0 {
return 0
}
if renyi.Alpha == 1 {
return KullbackLeibler{}.DistNormal(l, r)
}
logDetL := l.chol.LogDet()
logDetR := r.chol.LogDet()
// Σ_α = (1-α)Σ_l + αΣ_r.
sigA := mat.NewSymDense(dim, nil)
for i := 0; i < dim; i++ {
for j := i; j < dim; j++ {
v := (1-renyi.Alpha)*l.sigma.At(i, j) + renyi.Alpha*r.sigma.At(i, j)
sigA.SetSym(i, j, v)
}
}
var chol mat.Cholesky
ok := chol.Factorize(sigA)
if !ok {
return math.NaN()
}
logDetA := chol.LogDet()
mahalanobis := stat.Mahalanobis(mat.NewVecDense(dim, l.mu), mat.NewVecDense(dim, r.mu), &chol)
mahalanobisSq := mahalanobis * mahalanobis
return (renyi.Alpha/2)*mahalanobisSq + 1/(2*(1-renyi.Alpha))*(logDetA-(1-renyi.Alpha)*logDetL-renyi.Alpha*logDetR)
}
// Wasserstein is a type for computing the Wasserstein distance between two
// probability distributions.
//
// The Wasserstein distance is defined as
//
// W(l,r) := inf 𝔼(||X-Y||_2^2)^1/2
//
// For more information, see
//
// https://en.wikipedia.org/wiki/Wasserstein_metric
type Wasserstein struct{}
// DistNormal returns the Wasserstein distance between normal distributions l and r.
// The dimensions of the input distributions must match or DistNormal will panic.
//
// The Wasserstein distance for Normal distributions is
//
// d^2 = ||m_l - m_r||_2^2 + Tr(Σ_l + Σ_r - 2(Σ_l^(1/2)*Σ_r*Σ_l^(1/2))^(1/2))
//
// For more information, see
//
// http://djalil.chafai.net/blog/2010/04/30/wasserstein-distance-between-two-gaussians/
func (Wasserstein) DistNormal(l, r *Normal) float64 {
dim := l.Dim()
if dim != r.Dim() {
panic(badSizeMismatch)
}
d := floats.Distance(l.mu, r.mu, 2)
d = d * d
// Compute Σ_l^(1/2)
var ssl mat.SymDense
err := ssl.PowPSD(&l.sigma, 0.5)
if err != nil {
panic(err)
}
// Compute Σ_l^(1/2)*Σ_r*Σ_l^(1/2)
var mean mat.Dense
mean.Mul(&ssl, &r.sigma)
mean.Mul(&mean, &ssl)
// Reinterpret as symdense, and take Σ^(1/2)
meanSym := mat.NewSymDense(dim, mean.RawMatrix().Data)
err = ssl.PowPSD(meanSym, 0.5)
if err != nil {
panic(err)
}
tr := mat.Trace(&r.sigma)
tl := mat.Trace(&l.sigma)
tm := mat.Trace(&ssl)
return d + tl + tr - 2*tm
}

362
vendor/gonum.org/v1/gonum/stat/distmv/studentst.go generated vendored Normal file
View File

@@ -0,0 +1,362 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distmv
import (
"math"
"sort"
"golang.org/x/exp/rand"
"golang.org/x/tools/container/intsets"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/stat"
"gonum.org/v1/gonum/stat/distuv"
)
// StudentsT is a multivariate Student's T distribution. It is a distribution over
// ^n with the probability density
//
// p(y) = (Γ((ν+n)/2) / Γ(ν/2)) * (νπ)^(-n/2) * |Ʃ|^(-1/2) *
// (1 + 1/ν * (y-μ)ᵀ * Ʃ^-1 * (y-μ))^(-(ν+n)/2)
//
// where ν is a scalar greater than 2, μ is a vector in ^n, and Ʃ is an n×n
// symmetric positive definite matrix.
//
// In this distribution, ν sets the spread of the distribution, similar to
// the degrees of freedom in a univariate Student's T distribution. As ν → ∞,
// the distribution approaches a multi-variate normal distribution.
// μ is the mean of the distribution, and the covariance is ν/(ν-2)*Ʃ.
//
// See https://en.wikipedia.org/wiki/Student%27s_t-distribution and
// http://users.isy.liu.se/en/rt/roth/student.pdf for more information.
type StudentsT struct {
nu float64
mu []float64
// If src is altered, rnd must be updated.
src rand.Source
rnd *rand.Rand
sigma mat.SymDense // only stored if needed
chol mat.Cholesky
lower mat.TriDense
logSqrtDet float64
dim int
}
// NewStudentsT creates a new StudentsT with the given nu, mu, and sigma
// parameters.
//
// NewStudentsT panics if len(mu) == 0, or if len(mu) != sigma.SymmetricDim(). If
// the covariance matrix is not positive-definite, nil is returned and ok is false.
func NewStudentsT(mu []float64, sigma mat.Symmetric, nu float64, src rand.Source) (dist *StudentsT, ok bool) {
if len(mu) == 0 {
panic(badZeroDimension)
}
dim := sigma.SymmetricDim()
if dim != len(mu) {
panic(badSizeMismatch)
}
s := &StudentsT{
nu: nu,
mu: make([]float64, dim),
dim: dim,
src: src,
}
if src != nil {
s.rnd = rand.New(src)
}
copy(s.mu, mu)
ok = s.chol.Factorize(sigma)
if !ok {
return nil, false
}
s.sigma = *mat.NewSymDense(dim, nil)
s.sigma.CopySym(sigma)
s.chol.LTo(&s.lower)
s.logSqrtDet = 0.5 * s.chol.LogDet()
return s, true
}
// ConditionStudentsT returns the Student's T distribution that is the receiver
// conditioned on the input evidence, and the success of the operation.
// The returned Student's T has dimension
// n - len(observed), where n is the dimension of the original receiver.
// The dimension order is preserved during conditioning, so if the value
// of dimension 1 is observed, the returned normal represents dimensions {0, 2, ...}
// of the original Student's T distribution.
//
// ok indicates whether there was a failure during the update. If ok is false
// the operation failed and dist is not usable.
// Mathematically this is impossible, but can occur with finite precision arithmetic.
func (s *StudentsT) ConditionStudentsT(observed []int, values []float64, src rand.Source) (dist *StudentsT, ok bool) {
if len(observed) == 0 {
panic("studentst: no observed value")
}
if len(observed) != len(values) {
panic(badInputLength)
}
for _, v := range observed {
if v < 0 || v >= s.dim {
panic("studentst: observed value out of bounds")
}
}
newNu, newMean, newSigma := studentsTConditional(observed, values, s.nu, s.mu, &s.sigma)
if newMean == nil {
return nil, false
}
return NewStudentsT(newMean, newSigma, newNu, src)
}
// studentsTConditional updates a Student's T distribution based on the observed samples
// (see documentation for the public function). The Gaussian conditional update
// is treated as a special case when nu == math.Inf(1).
func studentsTConditional(observed []int, values []float64, nu float64, mu []float64, sigma mat.Symmetric) (newNu float64, newMean []float64, newSigma *mat.SymDense) {
dim := len(mu)
ob := len(observed)
unobserved := findUnob(observed, dim)
unob := len(unobserved)
if unob == 0 {
panic("stat: all dimensions observed")
}
mu1 := make([]float64, unob)
for i, v := range unobserved {
mu1[i] = mu[v]
}
mu2 := make([]float64, ob) // really v - mu2
for i, v := range observed {
mu2[i] = values[i] - mu[v]
}
var sigma11, sigma22 mat.SymDense
sigma11.SubsetSym(sigma, unobserved)
sigma22.SubsetSym(sigma, observed)
sigma21 := mat.NewDense(ob, unob, nil)
for i, r := range observed {
for j, c := range unobserved {
v := sigma.At(r, c)
sigma21.Set(i, j, v)
}
}
var chol mat.Cholesky
ok := chol.Factorize(&sigma22)
if !ok {
return math.NaN(), nil, nil
}
// Compute mu_1 + sigma_{2,1}ᵀ * sigma_{2,2}^-1 (v - mu_2).
v := mat.NewVecDense(ob, mu2)
var tmp, tmp2 mat.VecDense
err := chol.SolveVecTo(&tmp, v)
if err != nil {
return math.NaN(), nil, nil
}
tmp2.MulVec(sigma21.T(), &tmp)
for i := range mu1 {
mu1[i] += tmp2.At(i, 0)
}
// Compute tmp4 = sigma_{2,1}ᵀ * sigma_{2,2}^-1 * sigma_{2,1}.
// TODO(btracey): Should this be a method of SymDense?
var tmp3, tmp4 mat.Dense
err = chol.SolveTo(&tmp3, sigma21)
if err != nil {
return math.NaN(), nil, nil
}
tmp4.Mul(sigma21.T(), &tmp3)
// Compute sigma_{1,1} - tmp4
// TODO(btracey): If tmp4 can constructed with a method, then this can be
// replaced with SubSym.
for i := 0; i < len(unobserved); i++ {
for j := i; j < len(unobserved); j++ {
v := sigma11.At(i, j)
sigma11.SetSym(i, j, v-tmp4.At(i, j))
}
}
// The computed variables are accurate for a Normal.
if math.IsInf(nu, 1) {
return nu, mu1, &sigma11
}
// Compute beta = (v - mu_2)ᵀ * sigma_{2,2}^-1 * (v - mu_2)ᵀ
beta := mat.Dot(v, &tmp)
// Scale the covariance matrix
sigma11.ScaleSym((nu+beta)/(nu+float64(ob)), &sigma11)
return nu + float64(ob), mu1, &sigma11
}
// findUnob returns the unobserved variables (the complementary set to observed).
// findUnob panics if any value repeated in observed.
func findUnob(observed []int, dim int) (unobserved []int) {
var setOb intsets.Sparse
for _, v := range observed {
setOb.Insert(v)
}
var setAll intsets.Sparse
for i := 0; i < dim; i++ {
setAll.Insert(i)
}
var setUnob intsets.Sparse
setUnob.Difference(&setAll, &setOb)
unobserved = setUnob.AppendTo(nil)
sort.Ints(unobserved)
return unobserved
}
// CovarianceMatrix calculates the covariance matrix of the distribution,
// storing the result in dst. Upon return, the value at element {i, j} of the
// covariance matrix is equal to the covariance of the i^th and j^th variables.
//
// covariance(i, j) = E[(x_i - E[x_i])(x_j - E[x_j])]
//
// If the dst matrix is empty it will be resized to the correct dimensions,
// otherwise dst must match the dimension of the receiver or CovarianceMatrix
// will panic.
func (st *StudentsT) CovarianceMatrix(dst *mat.SymDense) {
if dst.IsEmpty() {
*dst = *(dst.GrowSym(st.dim).(*mat.SymDense))
} else if dst.SymmetricDim() != st.dim {
panic("studentst: input matrix size mismatch")
}
dst.CopySym(&st.sigma)
dst.ScaleSym(st.nu/(st.nu-2), dst)
}
// Dim returns the dimension of the distribution.
func (s *StudentsT) Dim() int {
return s.dim
}
// LogProb computes the log of the pdf of the point x.
func (s *StudentsT) LogProb(y []float64) float64 {
if len(y) != s.dim {
panic(badInputLength)
}
nu := s.nu
n := float64(s.dim)
lg1, _ := math.Lgamma((nu + n) / 2)
lg2, _ := math.Lgamma(nu / 2)
t1 := lg1 - lg2 - n/2*math.Log(nu*math.Pi) - s.logSqrtDet
mahal := stat.Mahalanobis(mat.NewVecDense(len(y), y), mat.NewVecDense(len(s.mu), s.mu), &s.chol)
mahal *= mahal
return t1 - ((nu+n)/2)*math.Log(1+mahal/nu)
}
// MarginalStudentsT returns the marginal distribution of the given input variables,
// and the success of the operation.
// That is, MarginalStudentsT returns
//
// p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o
//
// where x_i are the dimensions in the input, and x_o are the remaining dimensions.
// See https://en.wikipedia.org/wiki/Marginal_distribution for more information.
//
// The input src is passed to the created StudentsT.
//
// ok indicates whether there was a failure during the marginalization. If ok is false
// the operation failed and dist is not usable.
// Mathematically this is impossible, but can occur with finite precision arithmetic.
func (s *StudentsT) MarginalStudentsT(vars []int, src rand.Source) (dist *StudentsT, ok bool) {
newMean := make([]float64, len(vars))
for i, v := range vars {
newMean[i] = s.mu[v]
}
var newSigma mat.SymDense
newSigma.SubsetSym(&s.sigma, vars)
return NewStudentsT(newMean, &newSigma, s.nu, src)
}
// MarginalStudentsTSingle returns the marginal distribution of the given input variable.
// That is, MarginalStudentsTSingle returns
//
// p(x_i) = \int_{x_o} p(x_i | x_o) p(x_o) dx_o
//
// where i is the input index, and x_o are the remaining dimensions.
// See https://en.wikipedia.org/wiki/Marginal_distribution for more information.
//
// The input src is passed to the call to NewStudentsT.
func (s *StudentsT) MarginalStudentsTSingle(i int, src rand.Source) distuv.StudentsT {
return distuv.StudentsT{
Mu: s.mu[i],
Sigma: math.Sqrt(s.sigma.At(i, i)),
Nu: s.nu,
Src: src,
}
}
// TODO(btracey): Implement marginal single. Need to modify univariate StudentsT
// to be three-parameter.
// Mean returns the mean of the probability distribution.
//
// If dst is not nil, the mean will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (s *StudentsT) Mean(dst []float64) []float64 {
dst = reuseAs(dst, s.dim)
copy(dst, s.mu)
return dst
}
// Nu returns the degrees of freedom parameter of the distribution.
func (s *StudentsT) Nu() float64 {
return s.nu
}
// Prob computes the value of the probability density function at x.
func (s *StudentsT) Prob(y []float64) float64 {
return math.Exp(s.LogProb(y))
}
// Rand generates a random sample according to the distributon.
//
// If dst is not nil, the sample will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (s *StudentsT) Rand(dst []float64) []float64 {
// If Y is distributed according to N(0,Sigma), and U is chi^2 with
// parameter ν, then
// X = mu + Y * sqrt(nu / U)
// X is distributed according to this distribution.
// Generate Y.
dst = reuseAs(dst, s.dim)
if s.rnd == nil {
for i := range dst {
dst[i] = rand.NormFloat64()
}
} else {
for i := range dst {
dst[i] = s.rnd.NormFloat64()
}
}
y := mat.NewVecDense(s.dim, dst)
y.MulVec(&s.lower, y)
// Compute mu + Y*sqrt(nu/U)
u := distuv.ChiSquared{K: s.nu, Src: s.src}.Rand()
floats.AddScaledTo(dst, s.mu, math.Sqrt(s.nu/u), dst)
return dst
}

200
vendor/gonum.org/v1/gonum/stat/distmv/uniform.go generated vendored Normal file
View File

@@ -0,0 +1,200 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distmv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/spatial/r1"
)
// Uniform represents a multivariate uniform distribution.
type Uniform struct {
bounds []r1.Interval
dim int
rnd *rand.Rand
}
// NewUniform creates a new uniform distribution with the given bounds.
func NewUniform(bnds []r1.Interval, src rand.Source) *Uniform {
dim := len(bnds)
if dim == 0 {
panic(badZeroDimension)
}
for _, b := range bnds {
if b.Max < b.Min {
panic("uniform: maximum less than minimum")
}
}
u := &Uniform{
bounds: make([]r1.Interval, dim),
dim: dim,
}
if src != nil {
u.rnd = rand.New(src)
}
for i, b := range bnds {
u.bounds[i].Min = b.Min
u.bounds[i].Max = b.Max
}
return u
}
// NewUnitUniform creates a new Uniform distribution over the dim-dimensional
// unit hypercube. That is, a uniform distribution where each dimension has
// Min = 0 and Max = 1.
func NewUnitUniform(dim int, src rand.Source) *Uniform {
if dim <= 0 {
panic(nonPosDimension)
}
bounds := make([]r1.Interval, dim)
for i := range bounds {
bounds[i].Min = 0
bounds[i].Max = 1
}
u := Uniform{
bounds: bounds,
dim: dim,
}
if src != nil {
u.rnd = rand.New(src)
}
return &u
}
// Bounds returns the bounds on the variables of the distribution.
//
// If dst is not nil, the bounds will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (u *Uniform) Bounds(bounds []r1.Interval) []r1.Interval {
if bounds == nil {
bounds = make([]r1.Interval, u.Dim())
}
if len(bounds) != u.Dim() {
panic(badInputLength)
}
copy(bounds, u.bounds)
return bounds
}
// CDF returns the value of the multidimensional cumulative distribution
// function of the probability distribution at the point x.
//
// If dst is not nil, the value will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution. CDF will also panic
// if the length of x is not equal to the dimension of the distribution.
func (u *Uniform) CDF(dst, x []float64) []float64 {
if len(x) != u.dim {
panic(badSizeMismatch)
}
dst = reuseAs(dst, u.dim)
for i, v := range x {
if v < u.bounds[i].Min {
dst[i] = 0
} else if v > u.bounds[i].Max {
dst[i] = 1
} else {
dst[i] = (v - u.bounds[i].Min) / (u.bounds[i].Max - u.bounds[i].Min)
}
}
return dst
}
// Dim returns the dimension of the distribution.
func (u *Uniform) Dim() int {
return u.dim
}
// Entropy returns the differential entropy of the distribution.
func (u *Uniform) Entropy() float64 {
// Entropy is log of the volume.
var logVol float64
for _, b := range u.bounds {
logVol += math.Log(b.Max - b.Min)
}
return logVol
}
// LogProb computes the log of the pdf of the point x.
func (u *Uniform) LogProb(x []float64) float64 {
dim := u.dim
if len(x) != dim {
panic(badSizeMismatch)
}
var logprob float64
for i, b := range u.bounds {
if x[i] < b.Min || x[i] > b.Max {
return math.Inf(-1)
}
logprob -= math.Log(b.Max - b.Min)
}
return logprob
}
// Mean returns the mean of the probability distribution.
//
// If dst is not nil, the mean will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (u *Uniform) Mean(dst []float64) []float64 {
dst = reuseAs(dst, u.dim)
for i, b := range u.bounds {
dst[i] = (b.Max + b.Min) / 2
}
return dst
}
// Prob computes the value of the probability density function at x.
func (u *Uniform) Prob(x []float64) float64 {
return math.Exp(u.LogProb(x))
}
// Rand generates a random sample according to the distributon.
//
// If dst is not nil, the sample will be stored in-place into dst and returned,
// otherwise a new slice will be allocated first. If dst is not nil, it must
// have length equal to the dimension of the distribution.
func (u *Uniform) Rand(dst []float64) []float64 {
dst = reuseAs(dst, u.dim)
if u.rnd == nil {
for i, b := range u.bounds {
dst[i] = rand.Float64()*(b.Max-b.Min) + b.Min
}
return dst
}
for i, b := range u.bounds {
dst[i] = u.rnd.Float64()*(b.Max-b.Min) + b.Min
}
return dst
}
// Quantile returns the value of the multi-dimensional inverse cumulative
// distribution function at p.
//
// If dst is not nil, the quantile will be stored in-place into dst and
// returned, otherwise a new slice will be allocated first. If dst is not nil,
// it must have length equal to the dimension of the distribution. Quantile will
// also panic if the length of p is not equal to the dimension of the
// distribution.
//
// All of the values of p must be between 0 and 1, inclusive, or Quantile will
// panic.
func (u *Uniform) Quantile(dst, p []float64) []float64 {
if len(p) != u.dim {
panic(badSizeMismatch)
}
dst = reuseAs(dst, u.dim)
for i, v := range p {
if v < 0 || v > 1 {
panic(badQuantile)
}
dst[i] = v*(u.bounds[i].Max-u.bounds[i].Min) + u.bounds[i].Min
}
return dst
}

113
vendor/gonum.org/v1/gonum/stat/distuv/alphastable.go generated vendored Normal file
View File

@@ -0,0 +1,113 @@
// Copyright ©2020 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// AlphaStable represents an α-stable distribution with four parameters.
// See https://en.wikipedia.org/wiki/Stable_distribution for more information.
type AlphaStable struct {
// Alpha is the stability parameter.
// It is valid within the range 0 < α ≤ 2.
Alpha float64
// Beta is the skewness parameter.
// It is valid within the range -1 ≤ β ≤ 1.
Beta float64
// C is the scale parameter.
// It is valid when positive.
C float64
// Mu is the location parameter.
Mu float64
Src rand.Source
}
// ExKurtosis returns the excess kurtosis of the distribution.
// ExKurtosis returns NaN when Alpha != 2.
func (a AlphaStable) ExKurtosis() float64 {
if a.Alpha == 2 {
return 0
}
return math.NaN()
}
// Mean returns the mean of the probability distribution.
// Mean returns NaN when Alpha <= 1.
func (a AlphaStable) Mean() float64 {
if a.Alpha > 1 {
return a.Mu
}
return math.NaN()
}
// Median returns the median of the distribution.
// Median panics when Beta != 0, because then the mode is not analytically
// expressible.
func (a AlphaStable) Median() float64 {
if a.Beta == 0 {
return a.Mu
}
panic("distuv: cannot compute Median for Beta != 0")
}
// Mode returns the mode of the distribution.
// Mode panics when Beta != 0, because then the mode is not analytically
// expressible.
func (a AlphaStable) Mode() float64 {
if a.Beta == 0 {
return a.Mu
}
panic("distuv: cannot compute Mode for Beta != 0")
}
// NumParameters returns the number of parameters in the distribution.
func (a AlphaStable) NumParameters() int {
return 4
}
// Rand returns a random sample drawn from the distribution.
func (a AlphaStable) Rand() float64 {
// From https://en.wikipedia.org/wiki/Stable_distribution#Simulation_of_stable_variables
const halfPi = math.Pi / 2
u := Uniform{-halfPi, halfPi, a.Src}.Rand()
w := Exponential{1, a.Src}.Rand()
if a.Alpha == 1 {
f := halfPi + a.Beta*u
x := (f*math.Tan(u) - a.Beta*math.Log(halfPi*w*math.Cos(u)/f)) / halfPi
return a.C*(x+a.Beta*math.Log(a.C)/halfPi) + a.Mu
}
zeta := -a.Beta * math.Tan(halfPi*a.Alpha)
xi := math.Atan(-zeta) / a.Alpha
f := a.Alpha * (u + xi)
g := math.Sqrt(1+zeta*zeta) * math.Pow(math.Cos(u-f)/w, 1-a.Alpha) / math.Cos(u)
x := math.Pow(g, 1/a.Alpha) * math.Sin(f)
return a.C*x + a.Mu
}
// Skewness returns the skewness of the distribution.
// Skewness returns NaN when Alpha != 2.
func (a AlphaStable) Skewness() float64 {
if a.Alpha == 2 {
return 0
}
return math.NaN()
}
// StdDev returns the standard deviation of the probability distribution.
func (a AlphaStable) StdDev() float64 {
return math.Sqrt(a.Variance())
}
// Variance returns the variance of the probability distribution.
// Variance returns +Inf when Alpha != 2.
func (a AlphaStable) Variance() float64 {
if a.Alpha == 2 {
return 2 * a.C * a.C
}
return math.Inf(1)
}

141
vendor/gonum.org/v1/gonum/stat/distuv/bernoulli.go generated vendored Normal file
View File

@@ -0,0 +1,141 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// Bernoulli represents a random variable whose value is 1 with probability p and
// value of zero with probability 1-P. The value of P must be between 0 and 1.
// More information at https://en.wikipedia.org/wiki/Bernoulli_distribution.
type Bernoulli struct {
P float64
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (b Bernoulli) CDF(x float64) float64 {
if x < 0 {
return 0
}
if x < 1 {
return 1 - b.P
}
return 1
}
// Entropy returns the entropy of the distribution.
func (b Bernoulli) Entropy() float64 {
if b.P == 0 || b.P == 1 {
return 0
}
q := 1 - b.P
return -b.P*math.Log(b.P) - q*math.Log(q)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (b Bernoulli) ExKurtosis() float64 {
pq := b.P * (1 - b.P)
return (1 - 6*pq) / pq
}
// LogProb computes the natural logarithm of the value of the probability density function at x.
func (b Bernoulli) LogProb(x float64) float64 {
if x == 0 {
return math.Log(1 - b.P)
}
if x == 1 {
return math.Log(b.P)
}
return math.Inf(-1)
}
// Mean returns the mean of the probability distribution.
func (b Bernoulli) Mean() float64 {
return b.P
}
// Median returns the median of the probability distribution.
func (b Bernoulli) Median() float64 {
p := b.P
switch {
case p < 0.5:
return 0
case p > 0.5:
return 1
default:
return 0.5
}
}
// NumParameters returns the number of parameters in the distribution.
func (Bernoulli) NumParameters() int {
return 1
}
// Prob computes the value of the probability distribution at x.
func (b Bernoulli) Prob(x float64) float64 {
if x == 0 {
return 1 - b.P
}
if x == 1 {
return b.P
}
return 0
}
// Quantile returns the minimum value of x from amongst all those values whose CDF value exceeds or equals p.
func (b Bernoulli) Quantile(p float64) float64 {
if p < 0 || 1 < p {
panic(badPercentile)
}
if p <= 1-b.P {
return 0
}
return 1
}
// Rand returns a random sample drawn from the distribution.
func (b Bernoulli) Rand() float64 {
var rnd float64
if b.Src == nil {
rnd = rand.Float64()
} else {
rnd = rand.New(b.Src).Float64()
}
if rnd < b.P {
return 1
}
return 0
}
// Skewness returns the skewness of the distribution.
func (b Bernoulli) Skewness() float64 {
return (1 - 2*b.P) / math.Sqrt(b.P*(1-b.P))
}
// StdDev returns the standard deviation of the probability distribution.
func (b Bernoulli) StdDev() float64 {
return math.Sqrt(b.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (b Bernoulli) Survival(x float64) float64 {
if x < 0 {
return 1
}
if x < 1 {
return b.P
}
return 0
}
// Variance returns the variance of the probability distribution.
func (b Bernoulli) Variance() float64 {
return b.P * (1 - b.P)
}

152
vendor/gonum.org/v1/gonum/stat/distuv/beta.go generated vendored Normal file
View File

@@ -0,0 +1,152 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
)
// Beta implements the Beta distribution, a two-parameter continuous distribution
// with support between 0 and 1.
//
// The beta distribution has density function
//
// x^(α-1) * (1-x)^(β-1) * Γ(α+β) / (Γ(α)*Γ(β))
//
// For more information, see https://en.wikipedia.org/wiki/Beta_distribution
type Beta struct {
// Alpha is the left shape parameter of the distribution. Alpha must be greater
// than 0.
Alpha float64
// Beta is the right shape parameter of the distribution. Beta must be greater
// than 0.
Beta float64
Src rand.Source
}
// CDF computes the value of the cumulative distribution function at x.
func (b Beta) CDF(x float64) float64 {
if x <= 0 {
return 0
}
if x >= 1 {
return 1
}
return mathext.RegIncBeta(b.Alpha, b.Beta, x)
}
// Entropy returns the differential entropy of the distribution.
func (b Beta) Entropy() float64 {
if b.Alpha <= 0 || b.Beta <= 0 {
panic("beta: negative parameters")
}
return mathext.Lbeta(b.Alpha, b.Beta) - (b.Alpha-1)*mathext.Digamma(b.Alpha) -
(b.Beta-1)*mathext.Digamma(b.Beta) + (b.Alpha+b.Beta-2)*mathext.Digamma(b.Alpha+b.Beta)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (b Beta) ExKurtosis() float64 {
num := 6 * ((b.Alpha-b.Beta)*(b.Alpha-b.Beta)*(b.Alpha+b.Beta+1) - b.Alpha*b.Beta*(b.Alpha+b.Beta+2))
den := b.Alpha * b.Beta * (b.Alpha + b.Beta + 2) * (b.Alpha + b.Beta + 3)
return num / den
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (b Beta) LogProb(x float64) float64 {
if x < 0 || x > 1 {
return math.Inf(-1)
}
if b.Alpha <= 0 || b.Beta <= 0 {
panic("beta: negative parameters")
}
lab, _ := math.Lgamma(b.Alpha + b.Beta)
la, _ := math.Lgamma(b.Alpha)
lb, _ := math.Lgamma(b.Beta)
var lx float64
if b.Alpha != 1 {
lx = (b.Alpha - 1) * math.Log(x)
}
var l1mx float64
if b.Beta != 1 {
l1mx = (b.Beta - 1) * math.Log(1-x)
}
return lab - la - lb + lx + l1mx
}
// Mean returns the mean of the probability distribution.
func (b Beta) Mean() float64 {
return b.Alpha / (b.Alpha + b.Beta)
}
// Mode returns the mode of the distribution.
//
// Mode returns NaN if both parameters are less than or equal to 1 as a special case,
// 0 if only Alpha <= 1 and 1 if only Beta <= 1.
func (b Beta) Mode() float64 {
if b.Alpha <= 1 {
if b.Beta <= 1 {
return math.NaN()
}
return 0
}
if b.Beta <= 1 {
return 1
}
return (b.Alpha - 1) / (b.Alpha + b.Beta - 2)
}
// NumParameters returns the number of parameters in the distribution.
func (b Beta) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (b Beta) Prob(x float64) float64 {
return math.Exp(b.LogProb(x))
}
// Quantile returns the inverse of the cumulative distribution function.
func (b Beta) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
return mathext.InvRegIncBeta(b.Alpha, b.Beta, p)
}
// Rand returns a random sample drawn from the distribution.
func (b Beta) Rand() float64 {
ga := Gamma{Alpha: b.Alpha, Beta: 1, Src: b.Src}.Rand()
gb := Gamma{Alpha: b.Beta, Beta: 1, Src: b.Src}.Rand()
return ga / (ga + gb)
}
// StdDev returns the standard deviation of the probability distribution.
func (b Beta) StdDev() float64 {
return math.Sqrt(b.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (b Beta) Survival(x float64) float64 {
switch {
case x <= 0:
return 1
case x >= 1:
return 0
}
return mathext.RegIncBeta(b.Beta, b.Alpha, 1-x)
}
// Variance returns the variance of the probability distribution.
func (b Beta) Variance() float64 {
return b.Alpha * b.Beta / ((b.Alpha + b.Beta) * (b.Alpha + b.Beta) * (b.Alpha + b.Beta + 1))
}

190
vendor/gonum.org/v1/gonum/stat/distuv/binomial.go generated vendored Normal file
View File

@@ -0,0 +1,190 @@
// Copyright ©2018 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
"gonum.org/v1/gonum/stat/combin"
)
// Binomial implements the binomial distribution, a discrete probability distribution
// that expresses the probability of a given number of successful Bernoulli trials
// out of a total of n, each with success probability p.
// The binomial distribution has the density function:
//
// f(k) = (n choose k) p^k (1-p)^(n-k)
//
// For more information, see https://en.wikipedia.org/wiki/Binomial_distribution.
type Binomial struct {
// N is the total number of Bernoulli trials. N must be greater than 0.
N float64
// P is the probability of success in any given trial. P must be in [0, 1].
P float64
Src rand.Source
}
// CDF computes the value of the cumulative distribution function at x.
func (b Binomial) CDF(x float64) float64 {
if x < 0 {
return 0
}
if x >= b.N {
return 1
}
x = math.Floor(x)
return mathext.RegIncBeta(b.N-x, x+1, 1-b.P)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (b Binomial) ExKurtosis() float64 {
v := b.P * (1 - b.P)
return (1 - 6*v) / (b.N * v)
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (b Binomial) LogProb(x float64) float64 {
if x < 0 || x > b.N || math.Floor(x) != x {
return math.Inf(-1)
}
lb := combin.LogGeneralizedBinomial(b.N, x)
return lb + x*math.Log(b.P) + (b.N-x)*math.Log(1-b.P)
}
// Mean returns the mean of the probability distribution.
func (b Binomial) Mean() float64 {
return b.N * b.P
}
// NumParameters returns the number of parameters in the distribution.
func (Binomial) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (b Binomial) Prob(x float64) float64 {
return math.Exp(b.LogProb(x))
}
// Rand returns a random sample drawn from the distribution.
func (b Binomial) Rand() float64 {
// NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING (ISBN 0-521-43108-5)
// p. 295-6
// http://www.aip.de/groups/soe/local/numres/bookcpdf/c7-3.pdf
runif := rand.Float64
rexp := rand.ExpFloat64
if b.Src != nil {
rnd := rand.New(b.Src)
runif = rnd.Float64
rexp = rnd.ExpFloat64
}
p := b.P
if p > 0.5 {
p = 1 - p
}
am := b.N * p
if b.N < 25 {
// Use direct method.
bnl := 0.0
for i := 0; i < int(b.N); i++ {
if runif() < p {
bnl++
}
}
if p != b.P {
return b.N - bnl
}
return bnl
}
if am < 1 {
// Use rejection method with Poisson proposal.
const logM = 2.6e-2 // constant for rejection sampling (https://en.wikipedia.org/wiki/Rejection_sampling)
var bnl float64
z := -p
pclog := (1 + 0.5*z) * z / (1 + (1+1.0/6*z)*z) // Padé approximant of log(1 + x)
for {
bnl = 0.0
t := 0.0
for i := 0; i < int(b.N); i++ {
t += rexp()
if t >= am {
break
}
bnl++
}
bnlc := b.N - bnl
z = -bnl / b.N
log1p := (1 + 0.5*z) * z / (1 + (1+1.0/6*z)*z)
t = (bnlc+0.5)*log1p + bnl - bnlc*pclog + 1/(12*bnlc) - am + logM // Uses Stirling's expansion of log(n!)
if rexp() >= t {
break
}
}
if p != b.P {
return b.N - bnl
}
return bnl
}
// Original algorithm samples from a Poisson distribution with the
// appropriate expected value. However, the Poisson approximation is
// asymptotic such that the absolute deviation in probability is O(1/n).
// Rejection sampling produces exact variates with at worst less than 3%
// rejection with minimal additional computation.
// Use rejection method with Cauchy proposal.
g, _ := math.Lgamma(b.N + 1)
plog := math.Log(p)
pclog := math.Log1p(-p)
sq := math.Sqrt(2 * am * (1 - p))
for {
var em, y float64
for {
y = math.Tan(math.Pi * runif())
em = sq*y + am
if em >= 0 && em < b.N+1 {
break
}
}
em = math.Floor(em)
lg1, _ := math.Lgamma(em + 1)
lg2, _ := math.Lgamma(b.N - em + 1)
t := 1.2 * sq * (1 + y*y) * math.Exp(g-lg1-lg2+em*plog+(b.N-em)*pclog)
if runif() <= t {
if p != b.P {
return b.N - em
}
return em
}
}
}
// Skewness returns the skewness of the distribution.
func (b Binomial) Skewness() float64 {
return (1 - 2*b.P) / b.StdDev()
}
// StdDev returns the standard deviation of the probability distribution.
func (b Binomial) StdDev() float64 {
return math.Sqrt(b.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (b Binomial) Survival(x float64) float64 {
return 1 - b.CDF(x)
}
// Variance returns the variance of the probability distribution.
func (b Binomial) Variance() float64 {
return b.N * b.P * (1 - b.P)
}

185
vendor/gonum.org/v1/gonum/stat/distuv/categorical.go generated vendored Normal file
View File

@@ -0,0 +1,185 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// Categorical is an extension of the Bernoulli distribution where x takes
// values {0, 1, ..., len(w)-1} where w is the weight vector. Categorical must
// be initialized with NewCategorical.
type Categorical struct {
weights []float64
// heap is a weight heap.
//
// It keeps a heap-organised sum of remaining
// index weights that are available to be taken
// from.
//
// Each element holds the sum of weights for
// the corresponding index, plus the sum of
// its children's weights; the children of
// an element i can be found at positions
// 2*(i+1)-1 and 2*(i+1). The root of the
// weight heap is at element 0.
//
// See comments in container/heap for an
// explanation of the layout of a heap.
heap []float64
src rand.Source
}
// NewCategorical constructs a new categorical distribution where the probability
// that x equals i is proportional to w[i]. All of the weights must be
// nonnegative, and at least one of the weights must be positive.
func NewCategorical(w []float64, src rand.Source) Categorical {
c := Categorical{
weights: make([]float64, len(w)),
heap: make([]float64, len(w)),
src: src,
}
c.ReweightAll(w)
return c
}
// CDF computes the value of the cumulative density function at x.
func (c Categorical) CDF(x float64) float64 {
var cdf float64
for i, w := range c.weights {
if x < float64(i) {
break
}
cdf += w
}
return cdf / c.heap[0]
}
// Entropy returns the entropy of the distribution.
func (c Categorical) Entropy() float64 {
var ent float64
for _, w := range c.weights {
if w == 0 {
continue
}
p := w / c.heap[0]
ent += p * math.Log(p)
}
return -ent
}
// Len returns the number of values x could possibly take (the length of the
// initial supplied weight vector).
func (c Categorical) Len() int {
return len(c.weights)
}
// Mean returns the mean of the probability distribution.
func (c Categorical) Mean() float64 {
var mean float64
for i, v := range c.weights {
mean += float64(i) * v
}
return mean / c.heap[0]
}
// Prob computes the value of the probability density function at x.
func (c Categorical) Prob(x float64) float64 {
xi := int(x)
if float64(xi) != x {
return 0
}
if xi < 0 || xi > len(c.weights)-1 {
return 0
}
return c.weights[xi] / c.heap[0]
}
// LogProb computes the natural logarithm of the value of the probability density function at x.
func (c Categorical) LogProb(x float64) float64 {
return math.Log(c.Prob(x))
}
// Rand returns a random draw from the categorical distribution.
func (c Categorical) Rand() float64 {
var r float64
if c.src == nil {
r = c.heap[0] * rand.Float64()
} else {
r = c.heap[0] * rand.New(c.src).Float64()
}
i := 1
last := -1
left := len(c.weights)
for {
if r -= c.weights[i-1]; r <= 0 {
break // Fall within item i-1.
}
i <<= 1 // Move to left child.
if d := c.heap[i-1]; r > d {
r -= d
// If enough r to pass left child,
// move to right child state will
// be caught at break above.
i++
}
if i == last || left < 0 {
panic("categorical: bad sample")
}
last = i
left--
}
return float64(i - 1)
}
// Reweight sets the weight of item idx to w. The input weight must be
// non-negative, and after reweighting at least one of the weights must be
// positive.
func (c Categorical) Reweight(idx int, w float64) {
if w < 0 {
panic("categorical: negative weight")
}
w, c.weights[idx] = c.weights[idx]-w, w
idx++
for idx > 0 {
c.heap[idx-1] -= w
idx >>= 1
}
if c.heap[0] <= 0 {
panic("categorical: sum of the weights non-positive")
}
}
// ReweightAll resets the weights of the distribution. ReweightAll panics if
// len(w) != c.Len. All of the weights must be nonnegative, and at least one of
// the weights must be positive.
func (c Categorical) ReweightAll(w []float64) {
if len(w) != c.Len() {
panic("categorical: length of the slices do not match")
}
for _, v := range w {
if v < 0 {
panic("categorical: negative weight")
}
}
copy(c.weights, w)
c.reset()
}
func (c Categorical) reset() {
copy(c.heap, c.weights)
for i := len(c.heap) - 1; i > 0; i-- {
// Sometimes 1-based counting makes sense.
c.heap[((i+1)>>1)-1] += c.heap[i]
}
// TODO(btracey): Renormalization for weird weights?
if c.heap[0] <= 0 {
panic("categorical: sum of the weights non-positive")
}
}

125
vendor/gonum.org/v1/gonum/stat/distuv/chi.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
// Copyright ©2021 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
)
// Chi implements the χ distribution, a one parameter distribution
// with support on the positive numbers.
//
// The density function is given by
//
// 1/(2^{k/2-1} * Γ(k/2)) * x^{k - 1} * e^{-x^2/2}
//
// For more information, see https://en.wikipedia.org/wiki/Chi_distribution.
type Chi struct {
// K is the shape parameter, corresponding to the degrees of freedom. Must
// be greater than 0.
K float64
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (c Chi) CDF(x float64) float64 {
return mathext.GammaIncReg(c.K/2, (x*x)/2)
}
// Entropy returns the differential entropy of the distribution.
func (c Chi) Entropy() float64 {
lg, _ := math.Lgamma(c.K / 2)
return lg + 0.5*(c.K-math.Ln2-(c.K-1)*mathext.Digamma(c.K/2))
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (c Chi) ExKurtosis() float64 {
v := c.Variance()
s := math.Sqrt(v)
return 2 / v * (1 - c.Mean()*s*c.Skewness() - v)
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (c Chi) LogProb(x float64) float64 {
if x < 0 {
return math.Inf(-1)
}
lg, _ := math.Lgamma(c.K / 2)
return (c.K-1)*math.Log(x) - (x*x)/2 - (c.K/2-1)*math.Ln2 - lg
}
// Mean returns the mean of the probability distribution.
func (c Chi) Mean() float64 {
lg1, _ := math.Lgamma((c.K + 1) / 2)
lg, _ := math.Lgamma(c.K / 2)
return math.Sqrt2 * math.Exp(lg1-lg)
}
// Median returns the median of the distribution.
func (c Chi) Median() float64 {
return c.Quantile(0.5)
}
// Mode returns the mode of the distribution.
//
// Mode returns NaN if K is less than one.
func (c Chi) Mode() float64 {
return math.Sqrt(c.K - 1)
}
// NumParameters returns the number of parameters in the distribution.
func (c Chi) NumParameters() int {
return 1
}
// Prob computes the value of the probability density function at x.
func (c Chi) Prob(x float64) float64 {
return math.Exp(c.LogProb(x))
}
// Rand returns a random sample drawn from the distribution.
func (c Chi) Rand() float64 {
return math.Sqrt(Gamma{c.K / 2, 0.5, c.Src}.Rand())
}
// Quantile returns the inverse of the cumulative distribution function.
func (c Chi) Quantile(p float64) float64 {
if p < 0 || 1 < p {
panic(badPercentile)
}
return math.Sqrt(2 * mathext.GammaIncRegInv(0.5*c.K, p))
}
// Skewness returns the skewness of the distribution.
func (c Chi) Skewness() float64 {
v := c.Variance()
s := math.Sqrt(v)
return c.Mean() / (s * v) * (1 - 2*v)
}
// StdDev returns the standard deviation of the probability distribution.
func (c Chi) StdDev() float64 {
return math.Sqrt(c.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (c Chi) Survival(x float64) float64 {
if x < 0 {
return 1
}
return mathext.GammaIncRegComp(0.5*c.K, 0.5*(x*x))
}
// Variance returns the variance of the probability distribution.
func (c Chi) Variance() float64 {
m := c.Mean()
return math.Max(0, c.K-m*m)
}

102
vendor/gonum.org/v1/gonum/stat/distuv/chisquared.go generated vendored Normal file
View File

@@ -0,0 +1,102 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
)
// ChiSquared implements the χ² distribution, a one parameter distribution
// with support on the positive numbers.
//
// The density function is given by
//
// 1/(2^{k/2} * Γ(k/2)) * x^{k/2 - 1} * e^{-x/2}
//
// It is a special case of the Gamma distribution, Γ(k/2, 1/2).
//
// For more information, see https://en.wikipedia.org/wiki/Chi-squared_distribution.
type ChiSquared struct {
// K is the shape parameter, corresponding to the degrees of freedom. Must
// be greater than 0.
K float64
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (c ChiSquared) CDF(x float64) float64 {
return mathext.GammaIncReg(c.K/2, x/2)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (c ChiSquared) ExKurtosis() float64 {
return 12 / c.K
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (c ChiSquared) LogProb(x float64) float64 {
if x < 0 {
return math.Inf(-1)
}
lg, _ := math.Lgamma(c.K / 2)
return (c.K/2-1)*math.Log(x) - x/2 - (c.K/2)*math.Ln2 - lg
}
// Mean returns the mean of the probability distribution.
func (c ChiSquared) Mean() float64 {
return c.K
}
// Mode returns the mode of the distribution.
func (c ChiSquared) Mode() float64 {
return math.Max(c.K-2, 0)
}
// NumParameters returns the number of parameters in the distribution.
func (c ChiSquared) NumParameters() int {
return 1
}
// Prob computes the value of the probability density function at x.
func (c ChiSquared) Prob(x float64) float64 {
return math.Exp(c.LogProb(x))
}
// Rand returns a random sample drawn from the distribution.
func (c ChiSquared) Rand() float64 {
return Gamma{c.K / 2, 0.5, c.Src}.Rand()
}
// Quantile returns the inverse of the cumulative distribution function.
func (c ChiSquared) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
return mathext.GammaIncRegInv(0.5*c.K, p) * 2
}
// StdDev returns the standard deviation of the probability distribution.
func (c ChiSquared) StdDev() float64 {
return math.Sqrt(c.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (c ChiSquared) Survival(x float64) float64 {
if x < 0 {
return 1
}
return mathext.GammaIncRegComp(0.5*c.K, 0.5*x)
}
// Variance returns the variance of the probability distribution.
func (c ChiSquared) Variance() float64 {
return 2 * c.K
}

28
vendor/gonum.org/v1/gonum/stat/distuv/constants.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
const (
// oneOverRoot2Pi is the value of 1/(2Pi)^(1/2)
// http://www.wolframalpha.com/input/?i=1%2F%282+*+pi%29%5E%281%2F2%29
oneOverRoot2Pi = 0.39894228040143267793994605993438186847585863116493465766592582967065792589930183850125233390730693643030255886263518268
//LogRoot2Pi is the value of log(sqrt(2*Pi))
logRoot2Pi = 0.91893853320467274178032973640561763986139747363778341281715154048276569592726039769474329863595419762200564662463433744
negLogRoot2Pi = -logRoot2Pi
log2Pi = 1.8378770664093454835606594728112352797227949472755668
ln2 = 0.69314718055994530941723212145817656807550013436025525412068000949339362196969471560586332699641868754200148102057068573368552023
// EulerMascheroni constant.
eulerGamma = 0.5772156649015328606065120900824024310421593359399235988057672348848677267776646709369470632917467495146314472498070824809605
// sqrt3 is the value of sqrt(3)
// https://www.wolframalpha.com/input/?i=sqrt%283%29
sqrt3 = 1.7320508075688772935274463415058723669428052538103806280558069794519330169088000370811461867572485756756261414154067030299699450
)
const (
panicNameMismatch = "parameter name mismatch"
)

6
vendor/gonum.org/v1/gonum/stat/distuv/doc.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package distuv provides univariate random distribution types.
package distuv // import "gonum.org/v1/gonum/stat/distuv"

267
vendor/gonum.org/v1/gonum/stat/distuv/exponential.go generated vendored Normal file
View File

@@ -0,0 +1,267 @@
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/stat"
)
// Exponential represents the exponential distribution (https://en.wikipedia.org/wiki/Exponential_distribution).
type Exponential struct {
Rate float64
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (e Exponential) CDF(x float64) float64 {
if x < 0 {
return 0
}
return -math.Expm1(-e.Rate * x)
}
// ConjugateUpdate updates the parameters of the distribution from the sufficient
// statistics of a set of samples. The sufficient statistics, suffStat, have been
// observed with nSamples observations. The prior values of the distribution are those
// currently in the distribution, and have been observed with priorStrength samples.
//
// For the exponential distribution, the sufficient statistic is the inverse of
// the mean of the samples.
// The prior is having seen priorStrength[0] samples with inverse mean Exponential.Rate
// As a result of this function, Exponential.Rate is updated based on the weighted
// samples, and priorStrength is modified to include the new number of samples observed.
//
// This function panics if len(suffStat) != e.NumSuffStat() or
// len(priorStrength) != e.NumSuffStat().
func (e *Exponential) ConjugateUpdate(suffStat []float64, nSamples float64, priorStrength []float64) {
if len(suffStat) != e.NumSuffStat() {
panic("exponential: incorrect suffStat length")
}
if len(priorStrength) != e.NumSuffStat() {
panic("exponential: incorrect priorStrength length")
}
totalSamples := nSamples + priorStrength[0]
totalSum := nSamples / suffStat[0]
if !(priorStrength[0] == 0) {
totalSum += priorStrength[0] / e.Rate
}
e.Rate = totalSamples / totalSum
priorStrength[0] = totalSamples
}
// Entropy returns the entropy of the distribution.
func (e Exponential) Entropy() float64 {
return 1 - math.Log(e.Rate)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (Exponential) ExKurtosis() float64 {
return 6
}
// Fit sets the parameters of the probability distribution from the
// data samples x with relative weights w.
// If weights is nil, then all the weights are 1.
// If weights is not nil, then the len(weights) must equal len(samples).
func (e *Exponential) Fit(samples, weights []float64) {
suffStat := make([]float64, e.NumSuffStat())
nSamples := e.SuffStat(suffStat, samples, weights)
e.ConjugateUpdate(suffStat, nSamples, make([]float64, e.NumSuffStat()))
}
// LogProb computes the natural logarithm of the value of the probability density function at x.
func (e Exponential) LogProb(x float64) float64 {
if x < 0 {
return math.Inf(-1)
}
return math.Log(e.Rate) - e.Rate*x
}
// Mean returns the mean of the probability distribution.
func (e Exponential) Mean() float64 {
return 1 / e.Rate
}
// Median returns the median of the probability distribution.
func (e Exponential) Median() float64 {
return math.Ln2 / e.Rate
}
// Mode returns the mode of the probability distribution.
func (Exponential) Mode() float64 {
return 0
}
// NumParameters returns the number of parameters in the distribution.
func (Exponential) NumParameters() int {
return 1
}
// NumSuffStat returns the number of sufficient statistics for the distribution.
func (Exponential) NumSuffStat() int {
return 1
}
// Prob computes the value of the probability density function at x.
func (e Exponential) Prob(x float64) float64 {
return math.Exp(e.LogProb(x))
}
// Quantile returns the inverse of the cumulative probability distribution.
func (e Exponential) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
return -math.Log(1-p) / e.Rate
}
// Rand returns a random sample drawn from the distribution.
func (e Exponential) Rand() float64 {
var rnd float64
if e.Src == nil {
rnd = rand.ExpFloat64()
} else {
rnd = rand.New(e.Src).ExpFloat64()
}
return rnd / e.Rate
}
// Score returns the score function with respect to the parameters of the
// distribution at the input location x. The score function is the derivative
// of the log-likelihood at x with respect to the parameters
//
// (∂/∂θ) log(p(x;θ))
//
// If deriv is non-nil, len(deriv) must equal the number of parameters otherwise
// Score will panic, and the derivative is stored in-place into deriv. If deriv
// is nil a new slice will be allocated and returned.
//
// The order is [∂LogProb / ∂Rate].
//
// For more information, see https://en.wikipedia.org/wiki/Score_%28statistics%29.
//
// Special cases:
//
// Score(0) = [NaN]
func (e Exponential) Score(deriv []float64, x float64) []float64 {
if deriv == nil {
deriv = make([]float64, e.NumParameters())
}
if len(deriv) != e.NumParameters() {
panic(badLength)
}
if x > 0 {
deriv[0] = 1/e.Rate - x
return deriv
}
if x < 0 {
deriv[0] = 0
return deriv
}
deriv[0] = math.NaN()
return deriv
}
// ScoreInput returns the score function with respect to the input of the
// distribution at the input location specified by x. The score function is the
// derivative of the log-likelihood
//
// (d/dx) log(p(x)) .
//
// Special cases:
//
// ScoreInput(0) = NaN
func (e Exponential) ScoreInput(x float64) float64 {
if x > 0 {
return -e.Rate
}
if x < 0 {
return 0
}
return math.NaN()
}
// Skewness returns the skewness of the distribution.
func (Exponential) Skewness() float64 {
return 2
}
// StdDev returns the standard deviation of the probability distribution.
func (e Exponential) StdDev() float64 {
return 1 / e.Rate
}
// SuffStat computes the sufficient statistics of set of samples to update
// the distribution. The sufficient statistics are stored in place, and the
// effective number of samples are returned.
//
// The exponential distribution has one sufficient statistic, the average rate
// of the samples.
//
// If weights is nil, the weights are assumed to be 1, otherwise panics if
// len(samples) != len(weights). Panics if len(suffStat) != NumSuffStat().
func (Exponential) SuffStat(suffStat, samples, weights []float64) (nSamples float64) {
if len(weights) != 0 && len(samples) != len(weights) {
panic(badLength)
}
if len(suffStat) != (Exponential{}).NumSuffStat() {
panic(badSuffStat)
}
if len(weights) == 0 {
nSamples = float64(len(samples))
} else {
nSamples = floats.Sum(weights)
}
mean := stat.Mean(samples, weights)
suffStat[0] = 1 / mean
return nSamples
}
// Survival returns the survival function (complementary CDF) at x.
func (e Exponential) Survival(x float64) float64 {
if x < 0 {
return 1
}
return math.Exp(-e.Rate * x)
}
// setParameters modifies the parameters of the distribution.
func (e *Exponential) setParameters(p []Parameter) {
if len(p) != e.NumParameters() {
panic("exponential: incorrect number of parameters to set")
}
if p[0].Name != "Rate" {
panic("exponential: " + panicNameMismatch)
}
e.Rate = p[0].Value
}
// Variance returns the variance of the probability distribution.
func (e Exponential) Variance() float64 {
return 1 / (e.Rate * e.Rate)
}
// parameters returns the parameters of the distribution.
func (e Exponential) parameters(p []Parameter) []Parameter {
nParam := e.NumParameters()
if p == nil {
p = make([]Parameter, nParam)
} else if len(p) != nParam {
panic("exponential: improper parameter length")
}
p[0].Name = "Rate"
p[0].Value = e.Rate
return p
}

135
vendor/gonum.org/v1/gonum/stat/distuv/f.go generated vendored Normal file
View File

@@ -0,0 +1,135 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
)
// F implements the F-distribution, a two-parameter continuous distribution
// with support over the positive real numbers.
//
// The F-distribution has density function
//
// sqrt(((d1*x)^d1) * d2^d2 / ((d1*x+d2)^(d1+d2))) / (x * B(d1/2,d2/2))
//
// where B is the beta function.
//
// For more information, see https://en.wikipedia.org/wiki/F-distribution
type F struct {
D1 float64 // Degrees of freedom for the numerator
D2 float64 // Degrees of freedom for the denominator
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (f F) CDF(x float64) float64 {
return mathext.RegIncBeta(f.D1/2, f.D2/2, f.D1*x/(f.D1*x+f.D2))
}
// ExKurtosis returns the excess kurtosis of the distribution.
//
// ExKurtosis returns NaN if the D2 parameter is less or equal to 8.
func (f F) ExKurtosis() float64 {
if f.D2 <= 8 {
return math.NaN()
}
return (12 / (f.D2 - 6)) * ((5*f.D2-22)/(f.D2-8) + ((f.D2-4)/f.D1)*((f.D2-2)/(f.D2-8))*((f.D2-2)/(f.D1+f.D2-2)))
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (f F) LogProb(x float64) float64 {
return 0.5*(f.D1*math.Log(f.D1*x)+f.D2*math.Log(f.D2)-(f.D1+f.D2)*math.Log(f.D1*x+f.D2)) - math.Log(x) - mathext.Lbeta(f.D1/2, f.D2/2)
}
// Mean returns the mean of the probability distribution.
//
// Mean returns NaN if the D2 parameter is less than or equal to 2.
func (f F) Mean() float64 {
if f.D2 <= 2 {
return math.NaN()
}
return f.D2 / (f.D2 - 2)
}
// Mode returns the mode of the distribution.
//
// Mode returns NaN if the D1 parameter is less than or equal to 2.
func (f F) Mode() float64 {
if f.D1 <= 2 {
return math.NaN()
}
return ((f.D1 - 2) / f.D1) * (f.D2 / (f.D2 + 2))
}
// NumParameters returns the number of parameters in the distribution.
func (f F) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (f F) Prob(x float64) float64 {
return math.Exp(f.LogProb(x))
}
// Quantile returns the inverse of the cumulative distribution function.
func (f F) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
y := mathext.InvRegIncBeta(0.5*f.D1, 0.5*f.D2, p)
return f.D2 * y / (f.D1 * (1 - y))
}
// Rand returns a random sample drawn from the distribution.
func (f F) Rand() float64 {
u1 := ChiSquared{f.D1, f.Src}.Rand()
u2 := ChiSquared{f.D2, f.Src}.Rand()
return (u1 / f.D1) / (u2 / f.D2)
}
// Skewness returns the skewness of the distribution.
//
// Skewness returns NaN if the D2 parameter is less than or equal to 6.
func (f F) Skewness() float64 {
if f.D2 <= 6 {
return math.NaN()
}
num := (2*f.D1 + f.D2 - 2) * math.Sqrt(8*(f.D2-4))
den := (f.D2 - 6) * math.Sqrt(f.D1*(f.D1+f.D2-2))
return num / den
}
// StdDev returns the standard deviation of the probability distribution.
//
// StdDev returns NaN if the D2 parameter is less than or equal to 4.
func (f F) StdDev() float64 {
if f.D2 <= 4 {
return math.NaN()
}
return math.Sqrt(f.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (f F) Survival(x float64) float64 {
return 1 - f.CDF(x)
}
// Variance returns the variance of the probability distribution.
//
// Variance returns NaN if the D2 parameter is less than or equal to 4.
func (f F) Variance() float64 {
if f.D2 <= 4 {
return math.NaN()
}
num := 2 * f.D2 * f.D2 * (f.D1 + f.D2 - 2)
den := f.D1 * (f.D2 - 2) * (f.D2 - 2) * (f.D2 - 4)
return num / den
}

201
vendor/gonum.org/v1/gonum/stat/distuv/gamma.go generated vendored Normal file
View File

@@ -0,0 +1,201 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
)
// Gamma implements the Gamma distribution, a two-parameter continuous distribution
// with support over the positive real numbers.
//
// The gamma distribution has density function
//
// β^α / Γ(α) x^(α-1)e^(-βx)
//
// For more information, see https://en.wikipedia.org/wiki/Gamma_distribution
type Gamma struct {
// Alpha is the shape parameter of the distribution. Alpha must be greater
// than 0. If Alpha == 1, this is equivalent to an exponential distribution.
Alpha float64
// Beta is the rate parameter of the distribution. Beta must be greater than 0.
// If Beta == 2, this is equivalent to a Chi-Squared distribution.
Beta float64
Src rand.Source
}
// CDF computes the value of the cumulative distribution function at x.
func (g Gamma) CDF(x float64) float64 {
if x < 0 {
return 0
}
return mathext.GammaIncReg(g.Alpha, g.Beta*x)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (g Gamma) ExKurtosis() float64 {
return 6 / g.Alpha
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (g Gamma) LogProb(x float64) float64 {
if x <= 0 {
return math.Inf(-1)
}
a := g.Alpha
b := g.Beta
lg, _ := math.Lgamma(a)
return a*math.Log(b) - lg + (a-1)*math.Log(x) - b*x
}
// Mean returns the mean of the probability distribution.
func (g Gamma) Mean() float64 {
return g.Alpha / g.Beta
}
// Mode returns the mode of the normal distribution.
//
// The mode is NaN in the special case where the Alpha (shape) parameter
// is less than 1.
func (g Gamma) Mode() float64 {
if g.Alpha < 1 {
return math.NaN()
}
return (g.Alpha - 1) / g.Beta
}
// NumParameters returns the number of parameters in the distribution.
func (Gamma) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (g Gamma) Prob(x float64) float64 {
return math.Exp(g.LogProb(x))
}
// Quantile returns the inverse of the cumulative distribution function.
func (g Gamma) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
return mathext.GammaIncRegInv(g.Alpha, p) / g.Beta
}
// Rand returns a random sample drawn from the distribution.
//
// Rand panics if either alpha or beta is <= 0.
func (g Gamma) Rand() float64 {
const (
// The 0.2 threshold is from https://www4.stat.ncsu.edu/~rmartin/Codes/rgamss.R
// described in detail in https://arxiv.org/abs/1302.1884.
smallAlphaThresh = 0.2
)
if g.Beta <= 0 {
panic("gamma: beta <= 0")
}
unifrnd := rand.Float64
exprnd := rand.ExpFloat64
normrnd := rand.NormFloat64
if g.Src != nil {
rnd := rand.New(g.Src)
unifrnd = rnd.Float64
exprnd = rnd.ExpFloat64
normrnd = rnd.NormFloat64
}
a := g.Alpha
b := g.Beta
switch {
case a <= 0:
panic("gamma: alpha <= 0")
case a == 1:
// Generate from exponential
return exprnd() / b
case a < smallAlphaThresh:
// Generate using
// Liu, Chuanhai, Martin, Ryan and Syring, Nick. "Simulating from a
// gamma distribution with small shape parameter"
// https://arxiv.org/abs/1302.1884
// use this reference: http://link.springer.com/article/10.1007/s00180-016-0692-0
// Algorithm adjusted to work in log space as much as possible.
lambda := 1/a - 1
lr := -math.Log1p(1 / lambda / math.E)
for {
e := exprnd()
var z float64
if e >= -lr {
z = e + lr
} else {
z = -exprnd() / lambda
}
eza := math.Exp(-z / a)
lh := -z - eza
var lEta float64
if z >= 0 {
lEta = -z
} else {
lEta = -1 + lambda*z
}
if lh-lEta > -exprnd() {
return eza / b
}
}
case a >= smallAlphaThresh:
// Generate using:
// Marsaglia, George, and Wai Wan Tsang. "A simple method for generating
// gamma variables." ACM Transactions on Mathematical Software (TOMS)
// 26.3 (2000): 363-372.
d := a - 1.0/3
m := 1.0
if a < 1 {
d += 1.0
m = math.Pow(unifrnd(), 1/a)
}
c := 1 / (3 * math.Sqrt(d))
for {
x := normrnd()
v := 1 + x*c
if v <= 0.0 {
continue
}
v = v * v * v
u := unifrnd()
if u < 1.0-0.0331*(x*x)*(x*x) {
return m * d * v / b
}
if math.Log(u) < 0.5*x*x+d*(1-v+math.Log(v)) {
return m * d * v / b
}
}
}
panic("unreachable")
}
// Survival returns the survival function (complementary CDF) at x.
func (g Gamma) Survival(x float64) float64 {
if x < 0 {
return 1
}
return mathext.GammaIncRegComp(g.Alpha, g.Beta*x)
}
// StdDev returns the standard deviation of the probability distribution.
func (g Gamma) StdDev() float64 {
return math.Sqrt(g.Alpha) / g.Beta
}
// Variance returns the variance of the probability distribution.
func (g Gamma) Variance() float64 {
return g.Alpha / g.Beta / g.Beta
}

24
vendor/gonum.org/v1/gonum/stat/distuv/general.go generated vendored Normal file
View File

@@ -0,0 +1,24 @@
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
// Parameter represents a parameter of a probability distribution
type Parameter struct {
Name string
Value float64
}
const (
badPercentile = "distuv: percentile out of bounds"
badLength = "distuv: slice length mismatch"
badSuffStat = "distuv: wrong suffStat length"
errNoSamples = "distuv: must have at least one sample"
)
const (
expNegOneHalf = 0.6065306597126334236037995349911804534419 // https://oeis.org/A092605
eulerMascheroni = 0.5772156649015328606065120900824024310421 // https://oeis.org/A001620
apery = 1.2020569031595942853997381615114499907649 // https://oeis.org/A002117
)

119
vendor/gonum.org/v1/gonum/stat/distuv/gumbel.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
// Copyright ©2018 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// GumbelRight implements the right-skewed Gumbel distribution, a two-parameter
// continuous distribution with support over the real numbers. The right-skewed
// Gumbel distribution is also sometimes known as the Extreme Value distribution.
//
// The right-skewed Gumbel distribution has density function
//
// 1/beta * exp(-(z + exp(-z)))
// z = (x - mu)/beta
//
// Beta must be greater than 0.
//
// For more information, see https://en.wikipedia.org/wiki/Gumbel_distribution.
type GumbelRight struct {
Mu float64
Beta float64
Src rand.Source
}
func (g GumbelRight) z(x float64) float64 {
return (x - g.Mu) / g.Beta
}
// CDF computes the value of the cumulative density function at x.
func (g GumbelRight) CDF(x float64) float64 {
z := g.z(x)
return math.Exp(-math.Exp(-z))
}
// Entropy returns the differential entropy of the distribution.
func (g GumbelRight) Entropy() float64 {
return math.Log(g.Beta) + eulerMascheroni + 1
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (g GumbelRight) ExKurtosis() float64 {
return 12.0 / 5
}
// LogProb computes the natural logarithm of the value of the probability density function at x.
func (g GumbelRight) LogProb(x float64) float64 {
z := g.z(x)
return -math.Log(g.Beta) - z - math.Exp(-z)
}
// Mean returns the mean of the probability distribution.
func (g GumbelRight) Mean() float64 {
return g.Mu + g.Beta*eulerMascheroni
}
// Median returns the median of the Gumbel distribution.
func (g GumbelRight) Median() float64 {
return g.Mu - g.Beta*math.Log(math.Ln2)
}
// Mode returns the mode of the normal distribution.
func (g GumbelRight) Mode() float64 {
return g.Mu
}
// NumParameters returns the number of parameters in the distribution.
func (GumbelRight) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (g GumbelRight) Prob(x float64) float64 {
return math.Exp(g.LogProb(x))
}
// Quantile returns the inverse of the cumulative probability distribution.
func (g GumbelRight) Quantile(p float64) float64 {
if p < 0 || 1 < p {
panic(badPercentile)
}
return g.Mu - g.Beta*math.Log(-math.Log(p))
}
// Rand returns a random sample drawn from the distribution.
func (g GumbelRight) Rand() float64 {
var rnd float64
if g.Src == nil {
rnd = rand.ExpFloat64()
} else {
rnd = rand.New(g.Src).ExpFloat64()
}
return g.Mu - g.Beta*math.Log(rnd)
}
// Skewness returns the skewness of the distribution.
func (GumbelRight) Skewness() float64 {
return 12 * math.Sqrt(6) * apery / (math.Pi * math.Pi * math.Pi)
}
// StdDev returns the standard deviation of the probability distribution.
func (g GumbelRight) StdDev() float64 {
return (math.Pi / math.Sqrt(6)) * g.Beta
}
// Survival returns the survival function (complementary CDF) at x.
func (g GumbelRight) Survival(x float64) float64 {
return 1 - g.CDF(x)
}
// Variance returns the variance of the probability distribution.
func (g GumbelRight) Variance() float64 {
return math.Pi * math.Pi * g.Beta * g.Beta / 6
}

32
vendor/gonum.org/v1/gonum/stat/distuv/interfaces.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
// LogProber wraps the LogProb method.
type LogProber interface {
// LogProb returns the natural logarithm of the
// value of the probability density or probability
// mass function at x.
LogProb(x float64) float64
}
// Rander wraps the Rand method.
type Rander interface {
// Rand returns a random sample drawn from the distribution.
Rand() float64
}
// RandLogProber is the interface that groups the Rander and LogProber methods.
type RandLogProber interface {
Rander
LogProber
}
// Quantiler wraps the Quantile method.
type Quantiler interface {
// Quantile returns the minimum value of x from amongst
// all those values whose CDF value exceeds or equals p.
Quantile(p float64) float64
}

124
vendor/gonum.org/v1/gonum/stat/distuv/inversegamma.go generated vendored Normal file
View File

@@ -0,0 +1,124 @@
// Copyright ©2018 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
)
// InverseGamma implements the inverse gamma distribution, a two-parameter
// continuous distribution with support over the positive real numbers. The
// inverse gamma distribution is the same as the distribution of the reciprocal
// of a gamma distributed random variable.
//
// The inverse gamma distribution has density function
//
// β^α / Γ(α) x^(-α-1)e^(-β/x)
//
// For more information, see https://en.wikipedia.org/wiki/Inverse-gamma_distribution
type InverseGamma struct {
// Alpha is the shape parameter of the distribution. Alpha must be greater than 0.
Alpha float64
// Beta is the scale parameter of the distribution. Beta must be greater than 0.
Beta float64
Src rand.Source
}
// CDF computes the value of the cumulative distribution function at x.
func (g InverseGamma) CDF(x float64) float64 {
if x < 0 {
return 0
}
// TODO(btracey): Replace this with a direct call to the upper regularized
// gamma function if mathext gets it.
//return 1 - mathext.GammaInc(g.Alpha, g.Beta/x)
return mathext.GammaIncRegComp(g.Alpha, g.Beta/x)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (g InverseGamma) ExKurtosis() float64 {
if g.Alpha <= 4 {
return math.Inf(1)
}
return (30*g.Alpha - 66) / (g.Alpha - 3) / (g.Alpha - 4)
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (g InverseGamma) LogProb(x float64) float64 {
if x <= 0 {
return math.Inf(-1)
}
a := g.Alpha
b := g.Beta
lg, _ := math.Lgamma(a)
return a*math.Log(b) - lg + (-a-1)*math.Log(x) - b/x
}
// Mean returns the mean of the probability distribution.
func (g InverseGamma) Mean() float64 {
if g.Alpha <= 1 {
return math.Inf(1)
}
return g.Beta / (g.Alpha - 1)
}
// Mode returns the mode of the distribution.
func (g InverseGamma) Mode() float64 {
return g.Beta / (g.Alpha + 1)
}
// NumParameters returns the number of parameters in the distribution.
func (InverseGamma) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (g InverseGamma) Prob(x float64) float64 {
return math.Exp(g.LogProb(x))
}
// Quantile returns the inverse of the cumulative distribution function.
func (g InverseGamma) Quantile(p float64) float64 {
if p < 0 || 1 < p {
panic(badPercentile)
}
return (1 / (mathext.GammaIncRegCompInv(g.Alpha, p))) * g.Beta
}
// Rand returns a random sample drawn from the distribution.
//
// Rand panics if either alpha or beta is <= 0.
func (g InverseGamma) Rand() float64 {
// TODO(btracey): See if there is a more direct way to sample.
return 1 / Gamma(g).Rand()
}
// Survival returns the survival function (complementary CDF) at x.
func (g InverseGamma) Survival(x float64) float64 {
if x < 0 {
return 1
}
return mathext.GammaIncReg(g.Alpha, g.Beta/x)
}
// StdDev returns the standard deviation of the probability distribution.
func (g InverseGamma) StdDev() float64 {
return math.Sqrt(g.Variance())
}
// Variance returns the variance of the probability distribution.
func (g InverseGamma) Variance() float64 {
if g.Alpha <= 2 {
return math.Inf(1)
}
v := g.Beta / (g.Alpha - 1)
return v * v / (g.Alpha - 2)
}

267
vendor/gonum.org/v1/gonum/stat/distuv/laplace.go generated vendored Normal file
View File

@@ -0,0 +1,267 @@
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"sort"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/stat"
)
// Laplace represents the Laplace distribution (https://en.wikipedia.org/wiki/Laplace_distribution).
type Laplace struct {
Mu float64 // Mean of the Laplace distribution
Scale float64 // Scale of the Laplace distribution
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (l Laplace) CDF(x float64) float64 {
if x < l.Mu {
return 0.5 * math.Exp((x-l.Mu)/l.Scale)
}
return 1 - 0.5*math.Exp(-(x-l.Mu)/l.Scale)
}
// Entropy returns the entropy of the distribution.
func (l Laplace) Entropy() float64 {
return 1 + math.Log(2*l.Scale)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (l Laplace) ExKurtosis() float64 {
return 3
}
// Fit sets the parameters of the probability distribution from the
// data samples x with relative weights w.
// If weights is nil, then all the weights are 1.
// If weights is not nil, then the len(weights) must equal len(samples).
//
// Note: Laplace distribution has no FitPrior because it has no sufficient
// statistics.
func (l *Laplace) Fit(samples, weights []float64) {
if weights != nil && len(samples) != len(weights) {
panic(badLength)
}
if len(samples) == 0 {
panic(errNoSamples)
}
if len(samples) == 1 {
l.Mu = samples[0]
l.Scale = 0
return
}
var (
sortedSamples []float64
sortedWeights []float64
)
if sort.Float64sAreSorted(samples) {
sortedSamples = samples
sortedWeights = weights
} else {
// Need to copy variables so the input variables aren't effected by the sorting
sortedSamples = make([]float64, len(samples))
copy(sortedSamples, samples)
sortedWeights := make([]float64, len(samples))
copy(sortedWeights, weights)
stat.SortWeighted(sortedSamples, sortedWeights)
}
// The (weighted) median of the samples is the maximum likelihood estimate
// of the mean parameter
// TODO: Rethink quantile type when stat has more options
l.Mu = stat.Quantile(0.5, stat.Empirical, sortedSamples, sortedWeights)
// The scale parameter is the average absolute distance
// between the sample and the mean
var absError float64
var sumWeights float64
if weights != nil {
for i, v := range samples {
absError += weights[i] * math.Abs(l.Mu-v)
sumWeights += weights[i]
}
l.Scale = absError / sumWeights
} else {
for _, v := range samples {
absError += math.Abs(l.Mu - v)
}
l.Scale = absError / float64(len(samples))
}
}
// LogProb computes the natural logarithm of the value of the probability density
// function at x.
func (l Laplace) LogProb(x float64) float64 {
return -math.Ln2 - math.Log(l.Scale) - math.Abs(x-l.Mu)/l.Scale
}
// parameters returns the parameters of the distribution.
func (l Laplace) parameters(p []Parameter) []Parameter {
nParam := l.NumParameters()
if p == nil {
p = make([]Parameter, nParam)
} else if len(p) != nParam {
panic(badLength)
}
p[0].Name = "Mu"
p[0].Value = l.Mu
p[1].Name = "Scale"
p[1].Value = l.Scale
return p
}
// Mean returns the mean of the probability distribution.
func (l Laplace) Mean() float64 {
return l.Mu
}
// Median returns the median of the LaPlace distribution.
func (l Laplace) Median() float64 {
return l.Mu
}
// Mode returns the mode of the LaPlace distribution.
func (l Laplace) Mode() float64 {
return l.Mu
}
// NumParameters returns the number of parameters in the distribution.
func (l Laplace) NumParameters() int {
return 2
}
// Quantile returns the inverse of the cumulative probability distribution.
func (l Laplace) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
if p < 0.5 {
return l.Mu + l.Scale*math.Log(1+2*(p-0.5))
}
return l.Mu - l.Scale*math.Log(1-2*(p-0.5))
}
// Prob computes the value of the probability density function at x.
func (l Laplace) Prob(x float64) float64 {
return math.Exp(l.LogProb(x))
}
// Rand returns a random sample drawn from the distribution.
func (l Laplace) Rand() float64 {
var rnd float64
if l.Src == nil {
rnd = rand.Float64()
} else {
rnd = rand.New(l.Src).Float64()
}
u := rnd - 0.5
if u < 0 {
return l.Mu + l.Scale*math.Log(1+2*u)
}
return l.Mu - l.Scale*math.Log(1-2*u)
}
// Score returns the score function with respect to the parameters of the
// distribution at the input location x. The score function is the derivative
// of the log-likelihood at x with respect to the parameters
//
// (∂/∂θ) log(p(x;θ))
//
// If deriv is non-nil, len(deriv) must equal the number of parameters otherwise
// Score will panic, and the derivative is stored in-place into deriv. If deriv
// is nil a new slice will be allocated and returned.
//
// The order is [∂LogProb / ∂Mu, ∂LogProb / ∂Scale].
//
// For more information, see https://en.wikipedia.org/wiki/Score_%28statistics%29.
//
// Special cases:
//
// Score(l.Mu) = [NaN, -1/l.Scale]
func (l Laplace) Score(deriv []float64, x float64) []float64 {
if deriv == nil {
deriv = make([]float64, l.NumParameters())
}
if len(deriv) != l.NumParameters() {
panic(badLength)
}
diff := x - l.Mu
if diff > 0 {
deriv[0] = 1 / l.Scale
} else if diff < 0 {
deriv[0] = -1 / l.Scale
} else {
// must be NaN
deriv[0] = math.NaN()
}
deriv[1] = math.Abs(diff)/(l.Scale*l.Scale) - 1/l.Scale
return deriv
}
// ScoreInput returns the score function with respect to the input of the
// distribution at the input location specified by x. The score function is the
// derivative of the log-likelihood
//
// (d/dx) log(p(x)) .
//
// Special cases:
//
// ScoreInput(l.Mu) = NaN
func (l Laplace) ScoreInput(x float64) float64 {
diff := x - l.Mu
if diff == 0 {
return math.NaN()
}
if diff > 0 {
return -1 / l.Scale
}
return 1 / l.Scale
}
// Skewness returns the skewness of the distribution.
func (Laplace) Skewness() float64 {
return 0
}
// StdDev returns the standard deviation of the distribution.
func (l Laplace) StdDev() float64 {
return math.Sqrt2 * l.Scale
}
// Survival returns the survival function (complementary CDF) at x.
func (l Laplace) Survival(x float64) float64 {
if x < l.Mu {
return 1 - 0.5*math.Exp((x-l.Mu)/l.Scale)
}
return 0.5 * math.Exp(-(x-l.Mu)/l.Scale)
}
// setParameters modifies the parameters of the distribution.
func (l *Laplace) setParameters(p []Parameter) {
if len(p) != l.NumParameters() {
panic(badLength)
}
if p[0].Name != "Mu" {
panic("laplace: " + panicNameMismatch)
}
if p[1].Name != "Scale" {
panic("laplace: " + panicNameMismatch)
}
l.Mu = p[0].Value
l.Scale = p[1].Value
}
// Variance returns the variance of the probability distribution.
func (l Laplace) Variance() float64 {
return 2 * l.Scale * l.Scale
}

98
vendor/gonum.org/v1/gonum/stat/distuv/logistic.go generated vendored Normal file
View File

@@ -0,0 +1,98 @@
// Copyright ©2021 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
)
// Logistic implements the Logistic distribution, a two-parameter distribution with support on the real axis.
// Its cumulative distribution function is the logistic function.
//
// General form of probability density function for Logistic distribution is
//
// E(x) / (s * (1 + E(x))^2)
// where E(x) = exp(-(x-μ)/s)
//
// For more information, see https://en.wikipedia.org/wiki/Logistic_distribution.
type Logistic struct {
Mu float64 // Mean value
S float64 // Scale parameter proportional to standard deviation
}
// CDF computes the value of the cumulative density function at x.
func (l Logistic) CDF(x float64) float64 {
return 1 / (1 + math.Exp(-(x-l.Mu)/l.S))
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (l Logistic) ExKurtosis() float64 {
return 6.0 / 5.0
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (l Logistic) LogProb(x float64) float64 {
return x - 2*math.Log(math.Exp(x)+1)
}
// Mean returns the mean of the probability distribution.
func (l Logistic) Mean() float64 {
return l.Mu
}
// Mode returns the mode of the distribution.
//
// It is same as Mean for Logistic distribution.
func (l Logistic) Mode() float64 {
return l.Mu
}
// Median returns the median of the distribution.
//
// It is same as Mean for Logistic distribution.
func (l Logistic) Median() float64 {
return l.Mu
}
// NumParameters returns the number of parameters in the distribution.
//
// Always returns 2.
func (l Logistic) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (l Logistic) Prob(x float64) float64 {
E := math.Exp(-(x - l.Mu) / l.S)
return E / (l.S * math.Pow(1+E, 2))
}
// Quantile returns the inverse of the cumulative distribution function.
func (l Logistic) Quantile(p float64) float64 {
return l.Mu + l.S*math.Log(p/(1-p))
}
// Skewness returns the skewness of the distribution.
//
// Always 0 for Logistic distribution.
func (l Logistic) Skewness() float64 {
return 0
}
// StdDev returns the standard deviation of the probability distribution.
func (l Logistic) StdDev() float64 {
return l.S * math.Pi / sqrt3
}
// Survival returns the survival function (complementary CDF) at x.
func (l Logistic) Survival(x float64) float64 {
return 1 - l.CDF(x)
}
// Variance returns the variance of the probability distribution.
func (l Logistic) Variance() float64 {
return l.S * l.S * math.Pi * math.Pi / 3
}

114
vendor/gonum.org/v1/gonum/stat/distuv/lognormal.go generated vendored Normal file
View File

@@ -0,0 +1,114 @@
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// LogNormal represents a random variable whose log is normally distributed.
// The probability density function is given by
//
// 1/(x σ √2π) exp(-(ln(x)-μ)^2)/(2σ^2))
type LogNormal struct {
Mu float64
Sigma float64
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (l LogNormal) CDF(x float64) float64 {
return 0.5 * math.Erfc(-(math.Log(x)-l.Mu)/(math.Sqrt2*l.Sigma))
}
// Entropy returns the differential entropy of the distribution.
func (l LogNormal) Entropy() float64 {
return 0.5 + 0.5*math.Log(2*math.Pi*l.Sigma*l.Sigma) + l.Mu
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (l LogNormal) ExKurtosis() float64 {
s2 := l.Sigma * l.Sigma
return math.Exp(4*s2) + 2*math.Exp(3*s2) + 3*math.Exp(2*s2) - 6
}
// LogProb computes the natural logarithm of the value of the probability density function at x.
func (l LogNormal) LogProb(x float64) float64 {
if x < 0 {
return math.Inf(-1)
}
logx := math.Log(x)
normdiff := (logx - l.Mu) / l.Sigma
return -0.5*normdiff*normdiff - logx - math.Log(l.Sigma) - logRoot2Pi
}
// Mean returns the mean of the probability distribution.
func (l LogNormal) Mean() float64 {
return math.Exp(l.Mu + 0.5*l.Sigma*l.Sigma)
}
// Median returns the median of the probability distribution.
func (l LogNormal) Median() float64 {
return math.Exp(l.Mu)
}
// Mode returns the mode of the probability distribution.
func (l LogNormal) Mode() float64 {
return math.Exp(l.Mu - l.Sigma*l.Sigma)
}
// NumParameters returns the number of parameters in the distribution.
func (LogNormal) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (l LogNormal) Prob(x float64) float64 {
return math.Exp(l.LogProb(x))
}
// Quantile returns the inverse of the cumulative probability distribution.
func (l LogNormal) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
// Formula from http://www.math.uah.edu/stat/special/LogNormal.html.
return math.Exp(l.Mu + l.Sigma*UnitNormal.Quantile(p))
}
// Rand returns a random sample drawn from the distribution.
func (l LogNormal) Rand() float64 {
var rnd float64
if l.Src == nil {
rnd = rand.NormFloat64()
} else {
rnd = rand.New(l.Src).NormFloat64()
}
return math.Exp(rnd*l.Sigma + l.Mu)
}
// Skewness returns the skewness of the distribution.
func (l LogNormal) Skewness() float64 {
s2 := l.Sigma * l.Sigma
return (math.Exp(s2) + 2) * math.Sqrt(math.Exp(s2)-1)
}
// StdDev returns the standard deviation of the probability distribution.
func (l LogNormal) StdDev() float64 {
return math.Sqrt(l.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (l LogNormal) Survival(x float64) float64 {
return 0.5 * (1 - math.Erf((math.Log(x)-l.Mu)/(math.Sqrt2*l.Sigma)))
}
// Variance returns the variance of the probability distribution.
func (l LogNormal) Variance() float64 {
s2 := l.Sigma * l.Sigma
return (math.Exp(s2) - 1) * math.Exp(2*l.Mu+s2)
}

264
vendor/gonum.org/v1/gonum/stat/distuv/norm.go generated vendored Normal file
View File

@@ -0,0 +1,264 @@
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/mathext"
"gonum.org/v1/gonum/stat"
)
// UnitNormal is an instantiation of the normal distribution with Mu = 0 and Sigma = 1.
var UnitNormal = Normal{Mu: 0, Sigma: 1}
// Normal represents a normal (Gaussian) distribution (https://en.wikipedia.org/wiki/Normal_distribution).
type Normal struct {
Mu float64 // Mean of the normal distribution
Sigma float64 // Standard deviation of the normal distribution
Src rand.Source
// Needs to be Mu and Sigma and not Mean and StdDev because Normal has functions
// Mean and StdDev
}
// CDF computes the value of the cumulative density function at x.
func (n Normal) CDF(x float64) float64 {
return 0.5 * math.Erfc(-(x-n.Mu)/(n.Sigma*math.Sqrt2))
}
// ConjugateUpdate updates the parameters of the distribution from the sufficient
// statistics of a set of samples. The sufficient statistics, suffStat, have been
// observed with nSamples observations. The prior values of the distribution are those
// currently in the distribution, and have been observed with priorStrength samples.
//
// For the normal distribution, the sufficient statistics are the mean and
// uncorrected standard deviation of the samples.
// The prior is having seen strength[0] samples with mean Normal.Mu
// and strength[1] samples with standard deviation Normal.Sigma. As a result of
// this function, Normal.Mu and Normal.Sigma are updated based on the weighted
// samples, and strength is modified to include the new number of samples observed.
//
// This function panics if len(suffStat) != n.NumSuffStat() or
// len(priorStrength) != n.NumSuffStat().
func (n *Normal) ConjugateUpdate(suffStat []float64, nSamples float64, priorStrength []float64) {
// TODO: Support prior strength with math.Inf(1) to allow updating with
// a known mean/standard deviation
if len(suffStat) != n.NumSuffStat() {
panic("norm: incorrect suffStat length")
}
if len(priorStrength) != n.NumSuffStat() {
panic("norm: incorrect priorStrength length")
}
totalMeanSamples := nSamples + priorStrength[0]
totalSum := suffStat[0]*nSamples + n.Mu*priorStrength[0]
totalVarianceSamples := nSamples + priorStrength[1]
// sample variance
totalVariance := nSamples * suffStat[1] * suffStat[1]
// add prior variance
totalVariance += priorStrength[1] * n.Sigma * n.Sigma
// add cross variance from the difference of the means
meanDiff := (suffStat[0] - n.Mu)
totalVariance += priorStrength[0] * nSamples * meanDiff * meanDiff / totalMeanSamples
n.Mu = totalSum / totalMeanSamples
n.Sigma = math.Sqrt(totalVariance / totalVarianceSamples)
floats.AddConst(nSamples, priorStrength)
}
// Entropy returns the differential entropy of the distribution.
func (n Normal) Entropy() float64 {
return 0.5 * (log2Pi + 1 + 2*math.Log(n.Sigma))
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (Normal) ExKurtosis() float64 {
return 0
}
// Fit sets the parameters of the probability distribution from the
// data samples x with relative weights w. If weights is nil, then all the weights
// are 1. If weights is not nil, then the len(weights) must equal len(samples).
func (n *Normal) Fit(samples, weights []float64) {
suffStat := make([]float64, n.NumSuffStat())
nSamples := n.SuffStat(suffStat, samples, weights)
n.ConjugateUpdate(suffStat, nSamples, make([]float64, n.NumSuffStat()))
}
// LogProb computes the natural logarithm of the value of the probability density function at x.
func (n Normal) LogProb(x float64) float64 {
return negLogRoot2Pi - math.Log(n.Sigma) - (x-n.Mu)*(x-n.Mu)/(2*n.Sigma*n.Sigma)
}
// Mean returns the mean of the probability distribution.
func (n Normal) Mean() float64 {
return n.Mu
}
// Median returns the median of the normal distribution.
func (n Normal) Median() float64 {
return n.Mu
}
// Mode returns the mode of the normal distribution.
func (n Normal) Mode() float64 {
return n.Mu
}
// NumParameters returns the number of parameters in the distribution.
func (Normal) NumParameters() int {
return 2
}
// NumSuffStat returns the number of sufficient statistics for the distribution.
func (Normal) NumSuffStat() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (n Normal) Prob(x float64) float64 {
return math.Exp(n.LogProb(x))
}
// Quantile returns the inverse of the cumulative probability distribution.
func (n Normal) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
return n.Mu + n.Sigma*mathext.NormalQuantile(p)
}
// Rand returns a random sample drawn from the distribution.
func (n Normal) Rand() float64 {
var rnd float64
if n.Src == nil {
rnd = rand.NormFloat64()
} else {
rnd = rand.New(n.Src).NormFloat64()
}
return rnd*n.Sigma + n.Mu
}
// Score returns the score function with respect to the parameters of the
// distribution at the input location x. The score function is the derivative
// of the log-likelihood at x with respect to the parameters
//
// (∂/∂θ) log(p(x;θ))
//
// If deriv is non-nil, len(deriv) must equal the number of parameters otherwise
// Score will panic, and the derivative is stored in-place into deriv. If deriv
// is nil a new slice will be allocated and returned.
//
// The order is [∂LogProb / ∂Mu, ∂LogProb / ∂Sigma].
//
// For more information, see https://en.wikipedia.org/wiki/Score_%28statistics%29.
func (n Normal) Score(deriv []float64, x float64) []float64 {
if deriv == nil {
deriv = make([]float64, n.NumParameters())
}
if len(deriv) != n.NumParameters() {
panic(badLength)
}
deriv[0] = (x - n.Mu) / (n.Sigma * n.Sigma)
deriv[1] = 1 / n.Sigma * (-1 + ((x-n.Mu)/n.Sigma)*((x-n.Mu)/n.Sigma))
return deriv
}
// ScoreInput returns the score function with respect to the input of the
// distribution at the input location specified by x. The score function is the
// derivative of the log-likelihood
//
// (d/dx) log(p(x)) .
func (n Normal) ScoreInput(x float64) float64 {
return -(1 / (2 * n.Sigma * n.Sigma)) * 2 * (x - n.Mu)
}
// Skewness returns the skewness of the distribution.
func (Normal) Skewness() float64 {
return 0
}
// StdDev returns the standard deviation of the probability distribution.
func (n Normal) StdDev() float64 {
return n.Sigma
}
// SuffStat computes the sufficient statistics of a set of samples to update
// the distribution. The sufficient statistics are stored in place, and the
// effective number of samples are returned.
//
// The normal distribution has two sufficient statistics, the mean of the samples
// and the standard deviation of the samples.
//
// If weights is nil, the weights are assumed to be 1, otherwise panics if
// len(samples) != len(weights). Panics if len(suffStat) != NumSuffStat().
func (Normal) SuffStat(suffStat, samples, weights []float64) (nSamples float64) {
lenSamp := len(samples)
if len(weights) != 0 && len(samples) != len(weights) {
panic(badLength)
}
if len(suffStat) != (Normal{}).NumSuffStat() {
panic(badSuffStat)
}
if len(weights) == 0 {
nSamples = float64(lenSamp)
} else {
nSamples = floats.Sum(weights)
}
mean := stat.Mean(samples, weights)
suffStat[0] = mean
// Use Moment and not StdDev because we want it to be uncorrected
variance := stat.MomentAbout(2, samples, mean, weights)
suffStat[1] = math.Sqrt(variance)
return nSamples
}
// Survival returns the survival function (complementary CDF) at x.
func (n Normal) Survival(x float64) float64 {
return 0.5 * (1 - math.Erf((x-n.Mu)/(n.Sigma*math.Sqrt2)))
}
// setParameters modifies the parameters of the distribution.
func (n *Normal) setParameters(p []Parameter) {
if len(p) != n.NumParameters() {
panic("normal: incorrect number of parameters to set")
}
if p[0].Name != "Mu" {
panic("normal: " + panicNameMismatch)
}
if p[1].Name != "Sigma" {
panic("normal: " + panicNameMismatch)
}
n.Mu = p[0].Value
n.Sigma = p[1].Value
}
// Variance returns the variance of the probability distribution.
func (n Normal) Variance() float64 {
return n.Sigma * n.Sigma
}
// parameters returns the parameters of the distribution.
func (n Normal) parameters(p []Parameter) []Parameter {
nParam := n.NumParameters()
if p == nil {
p = make([]Parameter, nParam)
} else if len(p) != nParam {
panic("normal: improper parameter length")
}
p[0].Name = "Mu"
p[0].Value = n.Mu
p[1].Name = "Sigma"
p[1].Value = n.Sigma
return p
}

131
vendor/gonum.org/v1/gonum/stat/distuv/pareto.go generated vendored Normal file
View File

@@ -0,0 +1,131 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// Pareto implements the Pareto (Type I) distribution, a one parameter distribution
// with support above the scale parameter.
//
// The density function is given by
//
// (α x_m^{α})/(x^{α+1}) for x >= x_m.
//
// For more information, see https://en.wikipedia.org/wiki/Pareto_distribution.
type Pareto struct {
// Xm is the scale parameter.
// Xm must be greater than 0.
Xm float64
// Alpha is the shape parameter.
// Alpha must be greater than 0.
Alpha float64
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (p Pareto) CDF(x float64) float64 {
if x < p.Xm {
return 0
}
return -math.Expm1(p.Alpha * math.Log(p.Xm/x))
}
// Entropy returns the differential entropy of the distribution.
func (p Pareto) Entropy() float64 {
return math.Log(p.Xm) - math.Log(p.Alpha) + (1 + 1/p.Alpha)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (p Pareto) ExKurtosis() float64 {
if p.Alpha <= 4 {
return math.NaN()
}
return 6 * (p.Alpha*p.Alpha*p.Alpha + p.Alpha*p.Alpha - 6*p.Alpha - 2) / (p.Alpha * (p.Alpha - 3) * (p.Alpha - 4))
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (p Pareto) LogProb(x float64) float64 {
if x < p.Xm {
return math.Inf(-1)
}
return math.Log(p.Alpha) + p.Alpha*math.Log(p.Xm) - (p.Alpha+1)*math.Log(x)
}
// Mean returns the mean of the probability distribution.
func (p Pareto) Mean() float64 {
if p.Alpha <= 1 {
return math.Inf(1)
}
return p.Alpha * p.Xm / (p.Alpha - 1)
}
// Median returns the median of the pareto distribution.
func (p Pareto) Median() float64 {
return p.Quantile(0.5)
}
// Mode returns the mode of the distribution.
func (p Pareto) Mode() float64 {
return p.Xm
}
// NumParameters returns the number of parameters in the distribution.
func (p Pareto) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (p Pareto) Prob(x float64) float64 {
return math.Exp(p.LogProb(x))
}
// Quantile returns the inverse of the cumulative probability distribution.
func (p Pareto) Quantile(prob float64) float64 {
if prob < 0 || 1 < prob {
panic(badPercentile)
}
return p.Xm / math.Pow(1-prob, 1/p.Alpha)
}
// Rand returns a random sample drawn from the distribution.
func (p Pareto) Rand() float64 {
var rnd float64
if p.Src == nil {
rnd = rand.ExpFloat64()
} else {
rnd = rand.New(p.Src).ExpFloat64()
}
return p.Xm * math.Exp(rnd/p.Alpha)
}
// StdDev returns the standard deviation of the probability distribution.
func (p Pareto) StdDev() float64 {
return math.Sqrt(p.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (p Pareto) Survival(x float64) float64 {
if x < p.Xm {
return 1
}
return math.Pow(p.Xm/x, p.Alpha)
}
// Variance returns the variance of the probability distribution.
func (p Pareto) Variance() float64 {
if p.Alpha <= 2 {
return math.Inf(1)
}
am1 := p.Alpha - 1
return p.Xm * p.Xm * p.Alpha / (am1 * am1 * (p.Alpha - 2))
}

145
vendor/gonum.org/v1/gonum/stat/distuv/poisson.go generated vendored Normal file
View File

@@ -0,0 +1,145 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
)
// Poisson implements the Poisson distribution, a discrete probability distribution
// that expresses the probability of a given number of events occurring in a fixed
// interval.
// The poisson distribution has density function:
//
// f(k) = λ^k / k! e^(-λ)
//
// For more information, see https://en.wikipedia.org/wiki/Poisson_distribution.
type Poisson struct {
// Lambda is the average number of events in an interval.
// Lambda must be greater than 0.
Lambda float64
Src rand.Source
}
// CDF computes the value of the cumulative distribution function at x.
func (p Poisson) CDF(x float64) float64 {
if x < 0 {
return 0
}
return mathext.GammaIncRegComp(math.Floor(x+1), p.Lambda)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (p Poisson) ExKurtosis() float64 {
return 1 / p.Lambda
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (p Poisson) LogProb(x float64) float64 {
if x < 0 || math.Floor(x) != x {
return math.Inf(-1)
}
lg, _ := math.Lgamma(math.Floor(x) + 1)
return x*math.Log(p.Lambda) - p.Lambda - lg
}
// Mean returns the mean of the probability distribution.
func (p Poisson) Mean() float64 {
return p.Lambda
}
// NumParameters returns the number of parameters in the distribution.
func (Poisson) NumParameters() int {
return 1
}
// Prob computes the value of the probability density function at x.
func (p Poisson) Prob(x float64) float64 {
return math.Exp(p.LogProb(x))
}
// Rand returns a random sample drawn from the distribution.
func (p Poisson) Rand() float64 {
// NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING (ISBN 0-521-43108-5)
// p. 294
// <http://www.aip.de/groups/soe/local/numres/bookcpdf/c7-3.pdf>
rnd := rand.ExpFloat64
var rng *rand.Rand
if p.Src != nil {
rng = rand.New(p.Src)
rnd = rng.ExpFloat64
}
if p.Lambda < 10.0 {
// Use direct method.
var em float64
t := 0.0
for {
t += rnd()
if t >= p.Lambda {
break
}
em++
}
return em
}
// Generate using:
// W. Hörmann. "The transformed rejection method for generating Poisson
// random variables." Insurance: Mathematics and Economics
// 12.1 (1993): 39-45.
// Algorithm PTRS
rnd = rand.Float64
if rng != nil {
rnd = rng.Float64
}
b := 0.931 + 2.53*math.Sqrt(p.Lambda)
a := -0.059 + 0.02483*b
invalpha := 1.1239 + 1.1328/(b-3.4)
vr := 0.9277 - 3.6224/(b-2)
for {
U := rnd() - 0.5
V := rnd()
us := 0.5 - math.Abs(U)
k := math.Floor((2*a/us+b)*U + p.Lambda + 0.43)
if us >= 0.07 && V <= vr {
return k
}
if k <= 0 || (us < 0.013 && V > us) {
continue
}
lg, _ := math.Lgamma(k + 1)
if math.Log(V*invalpha/(a/(us*us)+b)) <= k*math.Log(p.Lambda)-p.Lambda-lg {
return k
}
}
}
// Skewness returns the skewness of the distribution.
func (p Poisson) Skewness() float64 {
return 1 / math.Sqrt(p.Lambda)
}
// StdDev returns the standard deviation of the probability distribution.
func (p Poisson) StdDev() float64 {
return math.Sqrt(p.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (p Poisson) Survival(x float64) float64 {
return 1 - p.CDF(x)
}
// Variance returns the variance of the probability distribution.
func (p Poisson) Variance() float64 {
return p.Lambda
}

142
vendor/gonum.org/v1/gonum/stat/distuv/statdist.go generated vendored Normal file
View File

@@ -0,0 +1,142 @@
// Copyright ©2018 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"gonum.org/v1/gonum/mathext"
)
// Bhattacharyya is a type for computing the Bhattacharyya distance between
// probability distributions.
//
// The Bhattacharyya distance is defined as
//
// D_B = -ln(BC(l,r))
// BC = \int_-∞^∞ (p(x)q(x))^(1/2) dx
//
// Where BC is known as the Bhattacharyya coefficient.
// The Bhattacharyya distance is related to the Hellinger distance by
//
// H(l,r) = sqrt(1-BC(l,r))
//
// For more information, see
//
// https://en.wikipedia.org/wiki/Bhattacharyya_distance
type Bhattacharyya struct{}
// DistBeta returns the Bhattacharyya distance between Beta distributions l and r.
// For Beta distributions, the Bhattacharyya distance is given by
//
// -ln(B((α_l + α_r)/2, (β_l + β_r)/2) / (B(α_l,β_l), B(α_r,β_r)))
//
// Where B is the Beta function.
func (Bhattacharyya) DistBeta(l, r Beta) float64 {
// Reference: https://en.wikipedia.org/wiki/Hellinger_distance#Examples
return -mathext.Lbeta((l.Alpha+r.Alpha)/2, (l.Beta+r.Beta)/2) +
0.5*mathext.Lbeta(l.Alpha, l.Beta) + 0.5*mathext.Lbeta(r.Alpha, r.Beta)
}
// DistNormal returns the Bhattacharyya distance Normal distributions l and r.
// For Normal distributions, the Bhattacharyya distance is given by
//
// s = (σ_l^2 + σ_r^2)/2
// BC = 1/8 (μ_l-μ_r)^2/s + 1/2 ln(s/(σ_l*σ_r))
func (Bhattacharyya) DistNormal(l, r Normal) float64 {
// Reference: https://en.wikipedia.org/wiki/Bhattacharyya_distance
m := l.Mu - r.Mu
s := (l.Sigma*l.Sigma + r.Sigma*r.Sigma) / 2
return 0.125*m*m/s + 0.5*math.Log(s) - 0.5*math.Log(l.Sigma) - 0.5*math.Log(r.Sigma)
}
// Hellinger is a type for computing the Hellinger distance between probability
// distributions.
//
// The Hellinger distance is defined as
//
// H^2(l,r) = 1/2 * int_x (\sqrt(l(x)) - \sqrt(r(x)))^2 dx
//
// and is bounded between 0 and 1. Note the above formula defines the squared
// Hellinger distance, while this returns the Hellinger distance itself.
// The Hellinger distance is related to the Bhattacharyya distance by
//
// H^2 = 1 - exp(-D_B)
//
// For more information, see
//
// https://en.wikipedia.org/wiki/Hellinger_distance
type Hellinger struct{}
// DistBeta computes the Hellinger distance between Beta distributions l and r.
// See the documentation of Bhattacharyya.DistBeta for the distance formula.
func (Hellinger) DistBeta(l, r Beta) float64 {
db := Bhattacharyya{}.DistBeta(l, r)
return math.Sqrt(-math.Expm1(-db))
}
// DistNormal computes the Hellinger distance between Normal distributions l and r.
// See the documentation of Bhattacharyya.DistNormal for the distance formula.
func (Hellinger) DistNormal(l, r Normal) float64 {
db := Bhattacharyya{}.DistNormal(l, r)
return math.Sqrt(-math.Expm1(-db))
}
// KullbackLeibler is a type for computing the Kullback-Leibler divergence from l to r.
//
// The Kullback-Leibler divergence is defined as
//
// D_KL(l || r ) = \int_x p(x) log(p(x)/q(x)) dx
//
// Note that the Kullback-Leibler divergence is not symmetric with respect to
// the order of the input arguments.
type KullbackLeibler struct{}
// DistBeta returns the Kullback-Leibler divergence between Beta distributions
// l and r.
//
// For two Beta distributions, the KL divergence is computed as
//
// D_KL(l || r) = log Γ(α_l+β_l) - log Γ(α_l) - log Γ(β_l)
// - log Γ(α_r+β_r) + log Γ(α_r) + log Γ(β_r)
// + (α_l-α_r)(ψ(α_l)-ψ(α_l+β_l)) + (β_l-β_r)(ψ(β_l)-ψ(α_l+β_l))
//
// Where Γ is the gamma function and ψ is the digamma function.
func (KullbackLeibler) DistBeta(l, r Beta) float64 {
// http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/
if l.Alpha <= 0 || l.Beta <= 0 {
panic("distuv: bad parameters for left distribution")
}
if r.Alpha <= 0 || r.Beta <= 0 {
panic("distuv: bad parameters for right distribution")
}
lab := l.Alpha + l.Beta
l1, _ := math.Lgamma(lab)
l2, _ := math.Lgamma(l.Alpha)
l3, _ := math.Lgamma(l.Beta)
lt := l1 - l2 - l3
r1, _ := math.Lgamma(r.Alpha + r.Beta)
r2, _ := math.Lgamma(r.Alpha)
r3, _ := math.Lgamma(r.Beta)
rt := r1 - r2 - r3
d0 := mathext.Digamma(l.Alpha + l.Beta)
ct := (l.Alpha-r.Alpha)*(mathext.Digamma(l.Alpha)-d0) + (l.Beta-r.Beta)*(mathext.Digamma(l.Beta)-d0)
return lt - rt + ct
}
// DistNormal returns the Kullback-Leibler divergence between Normal distributions
// l and r.
//
// For two Normal distributions, the KL divergence is computed as
//
// D_KL(l || r) = log(σ_r / σ_l) + (σ_l^2 + (μ_l-μ_r)^2)/(2 * σ_r^2) - 0.5
func (KullbackLeibler) DistNormal(l, r Normal) float64 {
d := l.Mu - r.Mu
v := (l.Sigma*l.Sigma + d*d) / (2 * r.Sigma * r.Sigma)
return math.Log(r.Sigma) - math.Log(l.Sigma) + v - 0.5
}

162
vendor/gonum.org/v1/gonum/stat/distuv/studentst.go generated vendored Normal file
View File

@@ -0,0 +1,162 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
)
const logPi = 1.1447298858494001741 // http://oeis.org/A053510
// StudentsT implements the three-parameter Student's T distribution, a distribution
// over the real numbers.
//
// The Student's T distribution has density function
//
// Γ((ν+1)/2) / (sqrt(νπ) Γ(ν/2) σ) (1 + 1/ν * ((x-μ)/σ)^2)^(-(ν+1)/2)
//
// The Student's T distribution approaches the normal distribution as ν → ∞.
//
// For more information, see https://en.wikipedia.org/wiki/Student%27s_t-distribution,
// specifically https://en.wikipedia.org/wiki/Student%27s_t-distribution#Non-standardized_Student.27s_t-distribution .
//
// The standard Student's T distribution is with Mu = 0, and Sigma = 1.
type StudentsT struct {
// Mu is the location parameter of the distribution, and the mean of the
// distribution
Mu float64
// Sigma is the scale parameter of the distribution. It is related to the
// standard deviation by std = Sigma * sqrt(Nu/(Nu-2))
Sigma float64
// Nu is the shape parameter of the distribution, representing the number of
// degrees of the distribution, and one less than the number of observations
// from a Normal distribution.
Nu float64
Src rand.Source
}
// CDF computes the value of the cumulative distribution function at x.
func (s StudentsT) CDF(x float64) float64 {
// transform to standard normal
y := (x - s.Mu) / s.Sigma
if y == 0 {
return 0.5
}
// For t > 0
// F(y) = 1 - 0.5 * I_t(y)(nu/2, 1/2)
// t(y) = nu/(y^2 + nu)
// and 1 - F(y) for t < 0
t := s.Nu / (y*y + s.Nu)
if y > 0 {
return 1 - 0.5*mathext.RegIncBeta(0.5*s.Nu, 0.5, t)
}
return 0.5 * mathext.RegIncBeta(s.Nu/2, 0.5, t)
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (s StudentsT) LogProb(x float64) float64 {
g1, _ := math.Lgamma((s.Nu + 1) / 2)
g2, _ := math.Lgamma(s.Nu / 2)
z := (x - s.Mu) / s.Sigma
return g1 - g2 - 0.5*math.Log(s.Nu) - 0.5*logPi - math.Log(s.Sigma) - ((s.Nu+1)/2)*math.Log(1+z*z/s.Nu)
}
// Mean returns the mean of the probability distribution.
func (s StudentsT) Mean() float64 {
return s.Mu
}
// Mode returns the mode of the distribution.
func (s StudentsT) Mode() float64 {
return s.Mu
}
// NumParameters returns the number of parameters in the distribution.
func (StudentsT) NumParameters() int {
return 3
}
// Prob computes the value of the probability density function at x.
func (s StudentsT) Prob(x float64) float64 {
return math.Exp(s.LogProb(x))
}
// Quantile returns the inverse of the cumulative distribution function.
func (s StudentsT) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
// F(x) = 1 - 0.5 * I_t(x)(nu/2, 1/2)
// t(x) = nu/(t^2 + nu)
if p == 0.5 {
return s.Mu
}
var y float64
if p > 0.5 {
// Know t > 0
t := mathext.InvRegIncBeta(s.Nu/2, 0.5, 2*(1-p))
y = math.Sqrt(s.Nu * (1 - t) / t)
} else {
t := mathext.InvRegIncBeta(s.Nu/2, 0.5, 2*p)
y = -math.Sqrt(s.Nu * (1 - t) / t)
}
// Convert out of standard normal
return y*s.Sigma + s.Mu
}
// Rand returns a random sample drawn from the distribution.
func (s StudentsT) Rand() float64 {
// http://www.math.uah.edu/stat/special/Student.html
n := Normal{0, 1, s.Src}.Rand()
c := Gamma{s.Nu / 2, 0.5, s.Src}.Rand()
z := n / math.Sqrt(c/s.Nu)
return z*s.Sigma + s.Mu
}
// StdDev returns the standard deviation of the probability distribution.
//
// The standard deviation is undefined for ν <= 1, and this returns math.NaN().
func (s StudentsT) StdDev() float64 {
return math.Sqrt(s.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (s StudentsT) Survival(x float64) float64 {
// transform to standard normal
y := (x - s.Mu) / s.Sigma
if y == 0 {
return 0.5
}
// For t > 0
// F(y) = 1 - 0.5 * I_t(y)(nu/2, 1/2)
// t(y) = nu/(y^2 + nu)
// and 1 - F(y) for t < 0
t := s.Nu / (y*y + s.Nu)
if y > 0 {
return 0.5 * mathext.RegIncBeta(s.Nu/2, 0.5, t)
}
return 1 - 0.5*mathext.RegIncBeta(s.Nu/2, 0.5, t)
}
// Variance returns the variance of the probability distribution.
//
// The variance is undefined for ν <= 1, and this returns math.NaN().
func (s StudentsT) Variance() float64 {
if s.Nu <= 1 {
return math.NaN()
}
if s.Nu <= 2 {
return math.Inf(1)
}
return s.Sigma * s.Sigma * s.Nu / (s.Nu - 2)
}

279
vendor/gonum.org/v1/gonum/stat/distuv/triangle.go generated vendored Normal file
View File

@@ -0,0 +1,279 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// Triangle represents a triangle distribution (https://en.wikipedia.org/wiki/Triangular_distribution).
type Triangle struct {
a, b, c float64
src rand.Source
}
// NewTriangle constructs a new triangle distribution with lower limit a, upper limit b, and mode c.
// Constraints are a < b and a ≤ c ≤ b.
// This distribution is uncommon in nature, but may be useful for simulation.
func NewTriangle(a, b, c float64, src rand.Source) Triangle {
checkTriangleParameters(a, b, c)
return Triangle{a: a, b: b, c: c, src: src}
}
func checkTriangleParameters(a, b, c float64) {
if a >= b {
panic("triangle: constraint of a < b violated")
}
if a > c {
panic("triangle: constraint of a <= c violated")
}
if c > b {
panic("triangle: constraint of c <= b violated")
}
}
// CDF computes the value of the cumulative density function at x.
func (t Triangle) CDF(x float64) float64 {
switch {
case x <= t.a:
return 0
case x <= t.c:
d := x - t.a
return (d * d) / ((t.b - t.a) * (t.c - t.a))
case x < t.b:
d := t.b - x
return 1 - (d*d)/((t.b-t.a)*(t.b-t.c))
default:
return 1
}
}
// Entropy returns the entropy of the distribution.
func (t Triangle) Entropy() float64 {
return 0.5 + math.Log(t.b-t.a) - math.Ln2
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (Triangle) ExKurtosis() float64 {
return -3.0 / 5.0
}
// Fit is not appropriate for Triangle, because the distribution is generally used when there is little data.
// LogProb computes the natural logarithm of the value of the probability density function at x.
func (t Triangle) LogProb(x float64) float64 {
return math.Log(t.Prob(x))
}
// Mean returns the mean of the probability distribution.
func (t Triangle) Mean() float64 {
return (t.a + t.b + t.c) / 3
}
// Median returns the median of the probability distribution.
func (t Triangle) Median() float64 {
if t.c >= (t.a+t.b)/2 {
return t.a + math.Sqrt((t.b-t.a)*(t.c-t.a)/2)
}
return t.b - math.Sqrt((t.b-t.a)*(t.b-t.c)/2)
}
// Mode returns the mode of the probability distribution.
func (t Triangle) Mode() float64 {
return t.c
}
// NumParameters returns the number of parameters in the distribution.
func (Triangle) NumParameters() int {
return 3
}
// Prob computes the value of the probability density function at x.
func (t Triangle) Prob(x float64) float64 {
switch {
case x < t.a:
return 0
case x < t.c:
return 2 * (x - t.a) / ((t.b - t.a) * (t.c - t.a))
case x == t.c:
return 2 / (t.b - t.a)
case x <= t.b:
return 2 * (t.b - x) / ((t.b - t.a) * (t.b - t.c))
default:
return 0
}
}
// Quantile returns the inverse of the cumulative probability distribution.
func (t Triangle) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
f := (t.c - t.a) / (t.b - t.a)
if p < f {
return t.a + math.Sqrt(p*(t.b-t.a)*(t.c-t.a))
}
return t.b - math.Sqrt((1-p)*(t.b-t.a)*(t.b-t.c))
}
// Rand returns a random sample drawn from the distribution.
func (t Triangle) Rand() float64 {
var rnd float64
if t.src == nil {
rnd = rand.Float64()
} else {
rnd = rand.New(t.src).Float64()
}
return t.Quantile(rnd)
}
// Score returns the score function with respect to the parameters of the
// distribution at the input location x. The score function is the derivative
// of the log-likelihood at x with respect to the parameters
//
// (∂/∂θ) log(p(x;θ))
//
// If deriv is non-nil, len(deriv) must equal the number of parameters otherwise
// Score will panic, and the derivative is stored in-place into deriv. If deriv
// is nil a new slice will be allocated and returned.
//
// The order is [∂LogProb / ∂Mu, ∂LogProb / ∂Sigma].
//
// For more information, see https://en.wikipedia.org/wiki/Score_%28statistics%29.
func (t Triangle) Score(deriv []float64, x float64) []float64 {
if deriv == nil {
deriv = make([]float64, t.NumParameters())
}
if len(deriv) != t.NumParameters() {
panic(badLength)
}
if (x < t.a) || (x > t.b) {
deriv[0] = math.NaN()
deriv[1] = math.NaN()
deriv[2] = math.NaN()
} else {
invBA := 1 / (t.b - t.a)
invCA := 1 / (t.c - t.a)
invBC := 1 / (t.b - t.c)
switch {
case x < t.c:
deriv[0] = -1/(x-t.a) + invBA + invCA
deriv[1] = -invBA
deriv[2] = -invCA
case x > t.c:
deriv[0] = invBA
deriv[1] = 1/(t.b-x) - invBA - invBC
deriv[2] = invBC
default:
deriv[0] = invBA
deriv[1] = -invBA
deriv[2] = 0
}
switch {
case x == t.a:
deriv[0] = math.NaN()
case x == t.b:
deriv[1] = math.NaN()
case x == t.c:
deriv[2] = math.NaN()
}
switch {
case t.a == t.c:
deriv[0] = math.NaN()
deriv[2] = math.NaN()
case t.b == t.c:
deriv[1] = math.NaN()
deriv[2] = math.NaN()
}
}
return deriv
}
// ScoreInput returns the score function with respect to the input of the
// distribution at the input location specified by x. The score function is the
// derivative of the log-likelihood
//
// (d/dx) log(p(x)) .
//
// Special cases (c is the mode of the distribution):
//
// ScoreInput(c) = NaN
// ScoreInput(x) = NaN for x not in (a, b)
func (t Triangle) ScoreInput(x float64) float64 {
if (x <= t.a) || (x >= t.b) || (x == t.c) {
return math.NaN()
}
if x < t.c {
return 1 / (x - t.a)
}
return 1 / (x - t.b)
}
// Skewness returns the skewness of the distribution.
func (t Triangle) Skewness() float64 {
n := math.Sqrt2 * (t.a + t.b - 2*t.c) * (2*t.a - t.b - t.c) * (t.a - 2*t.b + t.c)
d := 5 * math.Pow(t.a*t.a+t.b*t.b+t.c*t.c-t.a*t.b-t.a*t.c-t.b*t.c, 3.0/2.0)
return n / d
}
// StdDev returns the standard deviation of the probability distribution.
func (t Triangle) StdDev() float64 {
return math.Sqrt(t.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (t Triangle) Survival(x float64) float64 {
return 1 - t.CDF(x)
}
// parameters returns the parameters of the distribution.
func (t Triangle) parameters(p []Parameter) []Parameter {
nParam := t.NumParameters()
if p == nil {
p = make([]Parameter, nParam)
} else if len(p) != nParam {
panic("triangle: improper parameter length")
}
p[0].Name = "A"
p[0].Value = t.a
p[1].Name = "B"
p[1].Value = t.b
p[2].Name = "C"
p[2].Value = t.c
return p
}
// setParameters modifies the parameters of the distribution.
func (t *Triangle) setParameters(p []Parameter) {
if len(p) != t.NumParameters() {
panic("triangle: incorrect number of parameters to set")
}
if p[0].Name != "A" {
panic("triangle: " + panicNameMismatch)
}
if p[1].Name != "B" {
panic("triangle: " + panicNameMismatch)
}
if p[2].Name != "C" {
panic("triangle: " + panicNameMismatch)
}
checkTriangleParameters(p[0].Value, p[1].Value, p[2].Value)
t.a = p[0].Value
t.b = p[1].Value
t.c = p[2].Value
}
// Variance returns the variance of the probability distribution.
func (t Triangle) Variance() float64 {
return (t.a*t.a + t.b*t.b + t.c*t.c - t.a*t.b - t.a*t.c - t.b*t.c) / 18
}

211
vendor/gonum.org/v1/gonum/stat/distuv/uniform.go generated vendored Normal file
View File

@@ -0,0 +1,211 @@
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// UnitUniform is an instantiation of the uniform distribution with Min = 0
// and Max = 1.
var UnitUniform = Uniform{Min: 0, Max: 1}
// Uniform represents a continuous uniform distribution (https://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29).
type Uniform struct {
Min float64
Max float64
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (u Uniform) CDF(x float64) float64 {
if x < u.Min {
return 0
}
if x > u.Max {
return 1
}
return (x - u.Min) / (u.Max - u.Min)
}
// Uniform doesn't have any of the DLogProbD? because the derivative is 0 everywhere
// except where it's undefined
// Entropy returns the entropy of the distribution.
func (u Uniform) Entropy() float64 {
return math.Log(u.Max - u.Min)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (Uniform) ExKurtosis() float64 {
return -6.0 / 5.0
}
// Uniform doesn't have Fit because it's a bad idea to fit a uniform from data.
// LogProb computes the natural logarithm of the value of the probability density function at x.
func (u Uniform) LogProb(x float64) float64 {
if x < u.Min {
return math.Inf(-1)
}
if x > u.Max {
return math.Inf(-1)
}
return -math.Log(u.Max - u.Min)
}
// parameters returns the parameters of the distribution.
func (u Uniform) parameters(p []Parameter) []Parameter {
nParam := u.NumParameters()
if p == nil {
p = make([]Parameter, nParam)
} else if len(p) != nParam {
panic("uniform: improper parameter length")
}
p[0].Name = "Min"
p[0].Value = u.Min
p[1].Name = "Max"
p[1].Value = u.Max
return p
}
// Mean returns the mean of the probability distribution.
func (u Uniform) Mean() float64 {
return (u.Max + u.Min) / 2
}
// Median returns the median of the probability distribution.
func (u Uniform) Median() float64 {
return (u.Max + u.Min) / 2
}
// Uniform doesn't have a mode because it's any value in the distribution
// NumParameters returns the number of parameters in the distribution.
func (Uniform) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (u Uniform) Prob(x float64) float64 {
if x < u.Min {
return 0
}
if x > u.Max {
return 0
}
return 1 / (u.Max - u.Min)
}
// Quantile returns the inverse of the cumulative probability distribution.
func (u Uniform) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
return p*(u.Max-u.Min) + u.Min
}
// Rand returns a random sample drawn from the distribution.
func (u Uniform) Rand() float64 {
var rnd float64
if u.Src == nil {
rnd = rand.Float64()
} else {
rnd = rand.New(u.Src).Float64()
}
return rnd*(u.Max-u.Min) + u.Min
}
// Score returns the score function with respect to the parameters of the
// distribution at the input location x. The score function is the derivative
// of the log-likelihood at x with respect to the parameters
//
// (∂/∂θ) log(p(x;θ))
//
// If deriv is non-nil, len(deriv) must equal the number of parameters otherwise
// Score will panic, and the derivative is stored in-place into deriv. If deriv
// is nil a new slice will be allocated and returned.
//
// The order is [∂LogProb / ∂Mu, ∂LogProb / ∂Sigma].
//
// For more information, see https://en.wikipedia.org/wiki/Score_%28statistics%29.
func (u Uniform) Score(deriv []float64, x float64) []float64 {
if deriv == nil {
deriv = make([]float64, u.NumParameters())
}
if len(deriv) != u.NumParameters() {
panic(badLength)
}
if (x < u.Min) || (x > u.Max) {
deriv[0] = math.NaN()
deriv[1] = math.NaN()
} else {
deriv[0] = 1 / (u.Max - u.Min)
deriv[1] = -deriv[0]
if x == u.Min {
deriv[0] = math.NaN()
}
if x == u.Max {
deriv[1] = math.NaN()
}
}
return deriv
}
// ScoreInput returns the score function with respect to the input of the
// distribution at the input location specified by x. The score function is the
// derivative of the log-likelihood
//
// (d/dx) log(p(x)) .
func (u Uniform) ScoreInput(x float64) float64 {
if (x <= u.Min) || (x >= u.Max) {
return math.NaN()
}
return 0
}
// Skewness returns the skewness of the distribution.
func (Uniform) Skewness() float64 {
return 0
}
// StdDev returns the standard deviation of the probability distribution.
func (u Uniform) StdDev() float64 {
return math.Sqrt(u.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (u Uniform) Survival(x float64) float64 {
if x < u.Min {
return 1
}
if x > u.Max {
return 0
}
return (u.Max - x) / (u.Max - u.Min)
}
// setParameters modifies the parameters of the distribution.
func (u *Uniform) setParameters(p []Parameter) {
if len(p) != u.NumParameters() {
panic("uniform: incorrect number of parameters to set")
}
if p[0].Name != "Min" {
panic("uniform: " + panicNameMismatch)
}
if p[1].Name != "Max" {
panic("uniform: " + panicNameMismatch)
}
u.Min = p[0].Value
u.Max = p[1].Value
}
// Variance returns the variance of the probability distribution.
func (u Uniform) Variance() float64 {
return 1.0 / 12.0 * (u.Max - u.Min) * (u.Max - u.Min)
}

232
vendor/gonum.org/v1/gonum/stat/distuv/weibull.go generated vendored Normal file
View File

@@ -0,0 +1,232 @@
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package distuv
import (
"math"
"golang.org/x/exp/rand"
)
// Weibull distribution. Valid range for x is [0,+∞).
type Weibull struct {
// Shape parameter of the distribution. A value of 1 represents
// the exponential distribution. A value of 2 represents the
// Rayleigh distribution. Valid range is (0,+∞).
K float64
// Scale parameter of the distribution. Valid range is (0,+∞).
Lambda float64
// Source of random numbers
Src rand.Source
}
// CDF computes the value of the cumulative density function at x.
func (w Weibull) CDF(x float64) float64 {
if x < 0 {
return 0
}
return -math.Expm1(-math.Pow(x/w.Lambda, w.K))
}
// Entropy returns the entropy of the distribution.
func (w Weibull) Entropy() float64 {
return eulerGamma*(1-1/w.K) + math.Log(w.Lambda/w.K) + 1
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (w Weibull) ExKurtosis() float64 {
return (-6*w.gammaIPow(1, 4) + 12*w.gammaIPow(1, 2)*math.Gamma(1+2/w.K) - 3*w.gammaIPow(2, 2) - 4*math.Gamma(1+1/w.K)*math.Gamma(1+3/w.K) + math.Gamma(1+4/w.K)) / math.Pow(math.Gamma(1+2/w.K)-w.gammaIPow(1, 2), 2)
}
// gammIPow is a shortcut for computing the gamma function to a power.
func (w Weibull) gammaIPow(i, pow float64) float64 {
return math.Pow(math.Gamma(1+i/w.K), pow)
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x. -Inf is returned if x is less than zero.
//
// Special cases occur when x == 0, and the result depends on the shape
// parameter as follows:
//
// If 0 < K < 1, LogProb returns +Inf.
// If K == 1, LogProb returns 0.
// If K > 1, LogProb returns -Inf.
func (w Weibull) LogProb(x float64) float64 {
if x < 0 {
return math.Inf(-1)
}
if x == 0 && w.K == 1 {
return 0
}
return math.Log(w.K) - math.Log(w.Lambda) + (w.K-1)*(math.Log(x)-math.Log(w.Lambda)) - math.Pow(x/w.Lambda, w.K)
}
// LogSurvival returns the log of the survival function (complementary CDF) at x.
func (w Weibull) LogSurvival(x float64) float64 {
if x < 0 {
return 0
}
return -math.Pow(x/w.Lambda, w.K)
}
// Mean returns the mean of the probability distribution.
func (w Weibull) Mean() float64 {
return w.Lambda * math.Gamma(1+1/w.K)
}
// Median returns the median of the normal distribution.
func (w Weibull) Median() float64 {
return w.Lambda * math.Pow(ln2, 1/w.K)
}
// Mode returns the mode of the normal distribution.
//
// The mode is NaN in the special case where the K (shape) parameter
// is less than 1.
func (w Weibull) Mode() float64 {
if w.K > 1 {
return w.Lambda * math.Pow((w.K-1)/w.K, 1/w.K)
}
return 0
}
// NumParameters returns the number of parameters in the distribution.
func (Weibull) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (w Weibull) Prob(x float64) float64 {
if x < 0 {
return 0
}
return math.Exp(w.LogProb(x))
}
// Quantile returns the inverse of the cumulative probability distribution.
func (w Weibull) Quantile(p float64) float64 {
if p < 0 || p > 1 {
panic(badPercentile)
}
return w.Lambda * math.Pow(-math.Log(1-p), 1/w.K)
}
// Rand returns a random sample drawn from the distribution.
func (w Weibull) Rand() float64 {
var rnd float64
if w.Src == nil {
rnd = rand.Float64()
} else {
rnd = rand.New(w.Src).Float64()
}
return w.Quantile(rnd)
}
// Score returns the score function with respect to the parameters of the
// distribution at the input location x. The score function is the derivative
// of the log-likelihood at x with respect to the parameters
//
// (∂/∂θ) log(p(x;θ))
//
// If deriv is non-nil, len(deriv) must equal the number of parameters otherwise
// Score will panic, and the derivative is stored in-place into deriv. If deriv
// is nil a new slice will be allocated and returned.
//
// The order is [∂LogProb / ∂K, ∂LogProb / ∂λ].
//
// For more information, see https://en.wikipedia.org/wiki/Score_%28statistics%29.
//
// Special cases:
//
// Score(x) = [NaN, NaN] for x <= 0
func (w Weibull) Score(deriv []float64, x float64) []float64 {
if deriv == nil {
deriv = make([]float64, w.NumParameters())
}
if len(deriv) != w.NumParameters() {
panic(badLength)
}
if x > 0 {
deriv[0] = 1/w.K + math.Log(x) - math.Log(w.Lambda) - (math.Log(x)-math.Log(w.Lambda))*math.Pow(x/w.Lambda, w.K)
deriv[1] = (w.K * (math.Pow(x/w.Lambda, w.K) - 1)) / w.Lambda
return deriv
}
deriv[0] = math.NaN()
deriv[1] = math.NaN()
return deriv
}
// ScoreInput returns the score function with respect to the input of the
// distribution at the input location specified by x. The score function is the
// derivative of the log-likelihood
//
// (d/dx) log(p(x)) .
//
// Special cases:
//
// ScoreInput(x) = NaN for x <= 0
func (w Weibull) ScoreInput(x float64) float64 {
if x > 0 {
return (-w.K*math.Pow(x/w.Lambda, w.K) + w.K - 1) / x
}
return math.NaN()
}
// Skewness returns the skewness of the distribution.
func (w Weibull) Skewness() float64 {
stdDev := w.StdDev()
firstGamma, firstGammaSign := math.Lgamma(1 + 3/w.K)
logFirst := firstGamma + 3*(math.Log(w.Lambda)-math.Log(stdDev))
logSecond := math.Log(3) + math.Log(w.Mean()) + 2*math.Log(stdDev) - 3*math.Log(stdDev)
logThird := 3 * (math.Log(w.Mean()) - math.Log(stdDev))
return float64(firstGammaSign)*math.Exp(logFirst) - math.Exp(logSecond) - math.Exp(logThird)
}
// StdDev returns the standard deviation of the probability distribution.
func (w Weibull) StdDev() float64 {
return math.Sqrt(w.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (w Weibull) Survival(x float64) float64 {
return math.Exp(w.LogSurvival(x))
}
// setParameters modifies the parameters of the distribution.
func (w *Weibull) setParameters(p []Parameter) {
if len(p) != w.NumParameters() {
panic("weibull: incorrect number of parameters to set")
}
if p[0].Name != "K" {
panic("weibull: " + panicNameMismatch)
}
if p[1].Name != "λ" {
panic("weibull: " + panicNameMismatch)
}
w.K = p[0].Value
w.Lambda = p[1].Value
}
// Variance returns the variance of the probability distribution.
func (w Weibull) Variance() float64 {
return math.Pow(w.Lambda, 2) * (math.Gamma(1+2/w.K) - w.gammaIPow(1, 2))
}
// parameters returns the parameters of the distribution.
func (w Weibull) parameters(p []Parameter) []Parameter {
nParam := w.NumParameters()
if p == nil {
p = make([]Parameter, nParam)
} else if len(p) != nParam {
panic("weibull: improper parameter length")
}
p[0].Name = "K"
p[0].Value = w.K
p[1].Name = "λ"
p[1].Value = w.Lambda
return p
}

6
vendor/gonum.org/v1/gonum/stat/doc.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
// Copyright ©2017 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package stat provides generalized statistical functions.
package stat // import "gonum.org/v1/gonum/stat"

324
vendor/gonum.org/v1/gonum/stat/pca_cca.go generated vendored Normal file
View File

@@ -0,0 +1,324 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package stat
import (
"errors"
"math"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/mat"
)
// PC is a type for computing and extracting the principal components of a
// matrix. The results of the principal components analysis are only valid
// if the call to PrincipalComponents was successful.
type PC struct {
n, d int
weights []float64
svd *mat.SVD
ok bool
}
// PrincipalComponents performs a weighted principal components analysis on the
// matrix of the input data which is represented as an n×d matrix a where each
// row is an observation and each column is a variable.
//
// PrincipalComponents centers the variables but does not scale the variance.
//
// The weights slice is used to weight the observations. If weights is nil, each
// weight is considered to have a value of one, otherwise the length of weights
// must match the number of observations or PrincipalComponents will panic.
//
// PrincipalComponents returns whether the analysis was successful.
func (c *PC) PrincipalComponents(a mat.Matrix, weights []float64) (ok bool) {
c.n, c.d = a.Dims()
if weights != nil && len(weights) != c.n {
panic("stat: len(weights) != observations")
}
c.svd, c.ok = svdFactorizeCentered(c.svd, a, weights)
if c.ok {
c.weights = append(c.weights[:0], weights...)
}
return c.ok
}
// VectorsTo returns the component direction vectors of a principal components
// analysis. The vectors are returned in the columns of a d×min(n, d) matrix.
//
// If dst is empty, VectorsTo will resize dst to be d×min(n, d). When dst is
// non-empty, VectorsTo will panic if dst is not d×min(n, d). VectorsTo will also
// panic if the receiver does not contain a successful PC.
func (c *PC) VectorsTo(dst *mat.Dense) {
if !c.ok {
panic("stat: use of unsuccessful principal components analysis")
}
if dst.IsEmpty() {
dst.ReuseAs(c.d, min(c.n, c.d))
} else {
if d, n := dst.Dims(); d != c.d || n != min(c.n, c.d) {
panic(mat.ErrShape)
}
}
c.svd.VTo(dst)
}
// VarsTo returns the column variances of the principal component scores,
// b * vecs, where b is a matrix with centered columns. Variances are returned
// in descending order.
// If dst is not nil it is used to store the variances and returned.
// Vars will panic if the receiver has not successfully performed a principal
// components analysis or dst is not nil and the length of dst is not min(n, d).
func (c *PC) VarsTo(dst []float64) []float64 {
if !c.ok {
panic("stat: use of unsuccessful principal components analysis")
}
if dst != nil && len(dst) != min(c.n, c.d) {
panic("stat: length of slice does not match analysis")
}
dst = c.svd.Values(dst)
var f float64
if c.weights == nil {
f = 1 / float64(c.n-1)
} else {
f = 1 / (floats.Sum(c.weights) - 1)
}
for i, v := range dst {
dst[i] = f * v * v
}
return dst
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// CC is a type for computing the canonical correlations of a pair of matrices.
// The results of the canonical correlation analysis are only valid
// if the call to CanonicalCorrelations was successful.
type CC struct {
// n is the number of observations used to
// construct the canonical correlations.
n int
// xd and yd are used for size checks.
xd, yd int
x, y, c *mat.SVD
ok bool
}
// CanonicalCorrelations performs a canonical correlation analysis of the
// input data x and y, columns of which should be interpretable as two sets
// of measurements on the same observations (rows). These observations are
// optionally weighted by weights. The result of the analysis is stored in
// the receiver if the analysis is successful.
//
// Canonical correlation analysis finds associations between two sets of
// variables on the same observations by finding linear combinations of the two
// sphered datasets that maximize the correlation between them.
//
// Some notation: let Xc and Yc denote the centered input data matrices x
// and y (column means subtracted from each column), let Sx and Sy denote the
// sample covariance matrices within x and y respectively, and let Sxy denote
// the covariance matrix between x and y. The sphered data can then be expressed
// as Xc * Sx^{-1/2} and Yc * Sy^{-1/2} respectively, and the correlation matrix
// between the sphered data is called the canonical correlation matrix,
// Sx^{-1/2} * Sxy * Sy^{-1/2}. In cases where S^{-1/2} is ambiguous for some
// covariance matrix S, S^{-1/2} is taken to be E * D^{-1/2} * Eᵀ where S can
// be eigendecomposed as S = E * D * Eᵀ.
//
// The canonical correlations are the correlations between the corresponding
// pairs of canonical variables and can be obtained with c.Corrs(). Canonical
// variables can be obtained by projecting the sphered data into the left and
// right eigenvectors of the canonical correlation matrix, and these
// eigenvectors can be obtained with c.Left(m, true) and c.Right(m, true)
// respectively. The canonical variables can also be obtained directly from the
// centered raw data by using the back-transformed eigenvectors which can be
// obtained with c.Left(m, false) and c.Right(m, false) respectively.
//
// The first pair of left and right eigenvectors of the canonical correlation
// matrix can be interpreted as directions into which the respective sphered
// data can be projected such that the correlation between the two projections
// is maximized. The second pair and onwards solve the same optimization but
// under the constraint that they are uncorrelated (orthogonal in sphered space)
// to previous projections.
//
// CanonicalCorrelations will panic if the inputs x and y do not have the same
// number of rows.
//
// The slice weights is used to weight the observations. If weights is nil, each
// weight is considered to have a value of one, otherwise the length of weights
// must match the number of observations (rows of both x and y) or
// CanonicalCorrelations will panic.
//
// More details can be found at
// https://en.wikipedia.org/wiki/Canonical_correlation
// or in Chapter 3 of
// Koch, Inge. Analysis of multivariate and high-dimensional data.
// Vol. 32. Cambridge University Press, 2013. ISBN: 9780521887939
func (c *CC) CanonicalCorrelations(x, y mat.Matrix, weights []float64) error {
var yn int
c.n, c.xd = x.Dims()
yn, c.yd = y.Dims()
if c.n != yn {
panic("stat: unequal number of observations")
}
if weights != nil && len(weights) != c.n {
panic("stat: len(weights) != observations")
}
// Center and factorize x and y.
c.x, c.ok = svdFactorizeCentered(c.x, x, weights)
if !c.ok {
return errors.New("stat: failed to factorize x")
}
c.y, c.ok = svdFactorizeCentered(c.y, y, weights)
if !c.ok {
return errors.New("stat: failed to factorize y")
}
var xu, xv, yu, yv mat.Dense
c.x.UTo(&xu)
c.x.VTo(&xv)
c.y.UTo(&yu)
c.y.VTo(&yv)
// Calculate and factorise the canonical correlation matrix.
var ccor mat.Dense
ccor.Product(&xv, xu.T(), &yu, yv.T())
if c.c == nil {
c.c = &mat.SVD{}
}
c.ok = c.c.Factorize(&ccor, mat.SVDThin)
if !c.ok {
return errors.New("stat: failed to factorize ccor")
}
return nil
}
// CorrsTo returns the canonical correlations, using dst if it is not nil.
// If dst is not nil and len(dst) does not match the number of columns in
// the y input matrix, Corrs will panic.
func (c *CC) CorrsTo(dst []float64) []float64 {
if !c.ok {
panic("stat: canonical correlations missing or invalid")
}
if dst != nil && len(dst) != c.yd {
panic("stat: length of destination does not match input dimension")
}
return c.c.Values(dst)
}
// LeftTo returns the left eigenvectors of the canonical correlation matrix if
// spheredSpace is true. If spheredSpace is false it returns these eigenvectors
// back-transformed to the original data space.
//
// If dst is empty, LeftTo will resize dst to be xd×yd. When dst is
// non-empty, LeftTo will panic if dst is not xd×yd. LeftTo will also
// panic if the receiver does not contain a successful CC.
func (c *CC) LeftTo(dst *mat.Dense, spheredSpace bool) {
if !c.ok || c.n < 2 {
panic("stat: canonical correlations missing or invalid")
}
if dst.IsEmpty() {
dst.ReuseAs(c.xd, c.yd)
} else {
if d, n := dst.Dims(); d != c.xd || n != c.yd {
panic(mat.ErrShape)
}
}
c.c.UTo(dst)
if spheredSpace {
return
}
xs := c.x.Values(nil)
xv := &mat.Dense{}
c.x.VTo(xv)
scaleColsReciSqrt(xv, xs)
dst.Product(xv, xv.T(), dst)
dst.Scale(math.Sqrt(float64(c.n-1)), dst)
}
// RightTo returns the right eigenvectors of the canonical correlation matrix if
// spheredSpace is true. If spheredSpace is false it returns these eigenvectors
// back-transformed to the original data space.
//
// If dst is empty, RightTo will resize dst to be yd×yd. When dst is
// non-empty, RightTo will panic if dst is not yd×yd. RightTo will also
// panic if the receiver does not contain a successful CC.
func (c *CC) RightTo(dst *mat.Dense, spheredSpace bool) {
if !c.ok || c.n < 2 {
panic("stat: canonical correlations missing or invalid")
}
if dst.IsEmpty() {
dst.ReuseAs(c.yd, c.yd)
} else {
if d, n := dst.Dims(); d != c.yd || n != c.yd {
panic(mat.ErrShape)
}
}
c.c.VTo(dst)
if spheredSpace {
return
}
ys := c.y.Values(nil)
yv := &mat.Dense{}
c.y.VTo(yv)
scaleColsReciSqrt(yv, ys)
dst.Product(yv, yv.T(), dst)
dst.Scale(math.Sqrt(float64(c.n-1)), dst)
}
func svdFactorizeCentered(work *mat.SVD, m mat.Matrix, weights []float64) (svd *mat.SVD, ok bool) {
n, d := m.Dims()
centered := mat.NewDense(n, d, nil)
col := make([]float64, n)
for j := 0; j < d; j++ {
mat.Col(col, j, m)
floats.AddConst(-Mean(col, weights), col)
centered.SetCol(j, col)
}
for i, w := range weights {
floats.Scale(math.Sqrt(w), centered.RawRowView(i))
}
if work == nil {
work = &mat.SVD{}
}
ok = work.Factorize(centered, mat.SVDThin)
return work, ok
}
// scaleColsReciSqrt scales the columns of cols
// by the reciprocal square-root of vals.
func scaleColsReciSqrt(cols *mat.Dense, vals []float64) {
if cols == nil {
panic("stat: input nil")
}
n, d := cols.Dims()
if len(vals) != d {
panic("stat: input length mismatch")
}
col := make([]float64, n)
for j := 0; j < d; j++ {
mat.Col(col, j, cols)
floats.Scale(math.Sqrt(1/vals[j]), col)
cols.SetCol(j, col)
}
}

201
vendor/gonum.org/v1/gonum/stat/roc.go generated vendored Normal file
View File

@@ -0,0 +1,201 @@
// Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package stat
import (
"math"
"sort"
)
// ROC returns paired false positive rate (FPR) and true positive rate
// (TPR) values corresponding to cutoff points on the receiver operator
// characteristic (ROC) curve obtained when y is treated as a binary
// classifier for classes with weights. The cutoff thresholds used to
// calculate the ROC are returned in thresh such that tpr[i] and fpr[i]
// are the true and false positive rates for y >= thresh[i].
//
// The input y and cutoffs must be sorted, and values in y must correspond
// to values in classes and weights. SortWeightedLabeled can be used to
// sort y together with classes and weights.
//
// For a given cutoff value, observations corresponding to entries in y
// greater than the cutoff value are classified as true, while those
// less than or equal to the cutoff value are classified as false. These
// assigned class labels are compared with the true values in the classes
// slice and used to calculate the FPR and TPR.
//
// If weights is nil, all weights are treated as 1. If weights is not nil
// it must have the same length as y and classes, otherwise ROC will panic.
//
// If cutoffs is nil or empty, all possible cutoffs are calculated,
// resulting in fpr and tpr having length one greater than the number of
// unique values in y. Otherwise fpr and tpr will be returned with the
// same length as cutoffs. floats.Span can be used to generate equally
// spaced cutoffs.
//
// More details about ROC curves are available at
// https://en.wikipedia.org/wiki/Receiver_operating_characteristic
func ROC(cutoffs, y []float64, classes []bool, weights []float64) (tpr, fpr, thresh []float64) {
if len(y) != len(classes) {
panic("stat: slice length mismatch")
}
if weights != nil && len(y) != len(weights) {
panic("stat: slice length mismatch")
}
if !sort.Float64sAreSorted(y) {
panic("stat: input must be sorted ascending")
}
if !sort.Float64sAreSorted(cutoffs) {
panic("stat: cutoff values must be sorted ascending")
}
if len(y) == 0 {
return nil, nil, nil
}
if len(cutoffs) == 0 {
if cutoffs == nil || cap(cutoffs) < len(y)+1 {
cutoffs = make([]float64, len(y)+1)
} else {
cutoffs = cutoffs[:len(y)+1]
}
// Choose all possible cutoffs for unique values in y.
bin := 0
cutoffs[bin] = y[0]
for i, u := range y[1:] {
if u == y[i] {
continue
}
bin++
cutoffs[bin] = u
}
cutoffs[bin+1] = math.Inf(1)
cutoffs = cutoffs[:bin+2]
} else {
// Don't mutate the provided cutoffs.
tmp := cutoffs
cutoffs = make([]float64, len(cutoffs))
copy(cutoffs, tmp)
}
tpr = make([]float64, len(cutoffs))
fpr = make([]float64, len(cutoffs))
var bin int
var nPos, nNeg float64
for i, u := range classes {
// Update the bin until it matches the next y value
// skipping empty bins.
for bin < len(cutoffs)-1 && y[i] >= cutoffs[bin] {
bin++
tpr[bin] = tpr[bin-1]
fpr[bin] = fpr[bin-1]
}
posWeight, negWeight := 1.0, 0.0
if weights != nil {
posWeight = weights[i]
}
if !u {
posWeight, negWeight = negWeight, posWeight
}
nPos += posWeight
nNeg += negWeight
// Count false negatives (in tpr) and true negatives (in fpr).
if y[i] < cutoffs[bin] {
tpr[bin] += posWeight
fpr[bin] += negWeight
}
}
invNeg := 1 / nNeg
invPos := 1 / nPos
// Convert negative counts to TPR and FPR.
// Bins beyond the maximum value in y are skipped
// leaving these fpr and tpr elements as zero.
for i := range tpr[:bin+1] {
// Prevent fused float operations by
// making explicit float64 conversions.
tpr[i] = 1 - float64(tpr[i]*invPos)
fpr[i] = 1 - float64(fpr[i]*invNeg)
}
for i, j := 0, len(tpr)-1; i < j; i, j = i+1, j-1 {
tpr[i], tpr[j] = tpr[j], tpr[i]
fpr[i], fpr[j] = fpr[j], fpr[i]
}
for i, j := 0, len(cutoffs)-1; i < j; i, j = i+1, j-1 {
cutoffs[i], cutoffs[j] = cutoffs[j], cutoffs[i]
}
return tpr, fpr, cutoffs
}
// TOC returns the Total Operating Characteristic for the classes provided
// and the minimum and maximum bounds for the TOC.
//
// The input y values that correspond to classes and weights must be sorted
// in ascending order. classes[i] is the class of value y[i] and weights[i]
// is the weight of y[i]. SortWeightedLabeled can be used to sort classes
// together with weights by the rank variable, i+1.
//
// The returned ntp values can be interpreted as the number of true positives
// where values above the given rank are assigned class true for each given
// rank from 1 to len(classes).
//
// ntp_i = sum_{j ≥ len(ntp)-1 - i} [ classes_j ] * weights_j, where [x] = 1 if x else 0.
//
// The values of min and max provide the minimum and maximum possible number
// of false values for the set of classes. The first element of ntp, min and
// max are always zero as this corresponds to assigning all data class false
// and the last elements are always weighted sum of classes as this corresponds
// to assigning every data class true. For len(classes) != 0, the lengths of
// min, ntp and max are len(classes)+1.
//
// If weights is nil, all weights are treated as 1. When weights are not nil,
// the calculation of min and max allows for partial assignment of single data
// points. If weights is not nil it must have the same length as classes,
// otherwise TOC will panic.
//
// More details about TOC curves are available at
// https://en.wikipedia.org/wiki/Total_operating_characteristic
func TOC(classes []bool, weights []float64) (min, ntp, max []float64) {
if weights != nil && len(classes) != len(weights) {
panic("stat: slice length mismatch")
}
if len(classes) == 0 {
return nil, nil, nil
}
ntp = make([]float64, len(classes)+1)
min = make([]float64, len(ntp))
max = make([]float64, len(ntp))
if weights == nil {
for i := range ntp[1:] {
ntp[i+1] = ntp[i]
if classes[len(classes)-i-1] {
ntp[i+1]++
}
}
totalPositive := ntp[len(ntp)-1]
for i := range ntp {
min[i] = math.Max(0, totalPositive-float64(len(classes)-i))
max[i] = math.Min(totalPositive, float64(i))
}
return min, ntp, max
}
cumw := max // Reuse max for cumulative weight. Update its elements last.
for i := range ntp[1:] {
ntp[i+1] = ntp[i]
w := weights[len(weights)-i-1]
cumw[i+1] = cumw[i] + w
if classes[len(classes)-i-1] {
ntp[i+1] += w
}
}
totw := cumw[len(cumw)-1]
totalPositive := ntp[len(ntp)-1]
for i := range ntp {
min[i] = math.Max(0, totalPositive-(totw-cumw[i]))
max[i] = math.Min(totalPositive, cumw[i])
}
return min, ntp, max
}

1400
vendor/gonum.org/v1/gonum/stat/stat.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

142
vendor/gonum.org/v1/gonum/stat/statmat.go generated vendored Normal file
View File

@@ -0,0 +1,142 @@
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package stat
import (
"math"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/mat"
)
// CovarianceMatrix calculates the covariance matrix (also known as the
// variance-covariance matrix) calculated from a matrix of data, x, using
// a two-pass algorithm. The result is stored in dst.
//
// If weights is not nil the weighted covariance of x is calculated. weights
// must have length equal to the number of rows in input data matrix and
// must not contain negative elements.
// The dst matrix must either be empty or have the same number of
// columns as the input data matrix.
func CovarianceMatrix(dst *mat.SymDense, x mat.Matrix, weights []float64) {
// This is the matrix version of the two-pass algorithm. It doesn't use the
// additional floating point error correction that the Covariance function uses
// to reduce the impact of rounding during centering.
r, c := x.Dims()
if dst.IsEmpty() {
*dst = *(dst.GrowSym(c).(*mat.SymDense))
} else if n := dst.SymmetricDim(); n != c {
panic(mat.ErrShape)
}
var xt mat.Dense
xt.CloneFrom(x.T())
// Subtract the mean of each of the columns.
for i := 0; i < c; i++ {
v := xt.RawRowView(i)
// This will panic with ErrShape if len(weights) != len(v), so
// we don't have to check the size later.
mean := Mean(v, weights)
floats.AddConst(-mean, v)
}
if weights == nil {
// Calculate the normalization factor
// scaled by the sample size.
dst.SymOuterK(1/(float64(r)-1), &xt)
return
}
// Multiply by the sqrt of the weights, so that multiplication is symmetric.
sqrtwts := make([]float64, r)
for i, w := range weights {
if w < 0 {
panic("stat: negative covariance matrix weights")
}
sqrtwts[i] = math.Sqrt(w)
}
// Weight the rows.
for i := 0; i < c; i++ {
v := xt.RawRowView(i)
floats.Mul(v, sqrtwts)
}
// Calculate the normalization factor
// scaled by the weighted sample size.
dst.SymOuterK(1/(floats.Sum(weights)-1), &xt)
}
// CorrelationMatrix returns the correlation matrix calculated from a matrix
// of data, x, using a two-pass algorithm. The result is stored in dst.
//
// If weights is not nil the weighted correlation of x is calculated. weights
// must have length equal to the number of rows in input data matrix and
// must not contain negative elements.
// The dst matrix must either be empty or have the same number of
// columns as the input data matrix.
func CorrelationMatrix(dst *mat.SymDense, x mat.Matrix, weights []float64) {
// This will panic if the sizes don't match, or if weights is the wrong size.
CovarianceMatrix(dst, x, weights)
covToCorr(dst)
}
// covToCorr converts a covariance matrix to a correlation matrix.
func covToCorr(c *mat.SymDense) {
r := c.SymmetricDim()
s := make([]float64, r)
for i := 0; i < r; i++ {
s[i] = 1 / math.Sqrt(c.At(i, i))
}
for i, sx := range s {
// Ensure that the diagonal has exactly ones.
c.SetSym(i, i, 1)
for j := i + 1; j < r; j++ {
v := c.At(i, j)
c.SetSym(i, j, v*sx*s[j])
}
}
}
// corrToCov converts a correlation matrix to a covariance matrix.
// The input sigma should be vector of standard deviations corresponding
// to the covariance. It will panic if len(sigma) is not equal to the
// number of rows in the correlation matrix.
func corrToCov(c *mat.SymDense, sigma []float64) {
r, _ := c.Dims()
if r != len(sigma) {
panic(mat.ErrShape)
}
for i, sx := range sigma {
// Ensure that the diagonal has exactly sigma squared.
c.SetSym(i, i, sx*sx)
for j := i + 1; j < r; j++ {
v := c.At(i, j)
c.SetSym(i, j, v*sx*sigma[j])
}
}
}
// Mahalanobis computes the Mahalanobis distance
//
// D = sqrt((x-y)ᵀ * Σ^-1 * (x-y))
//
// between the column vectors x and y given the cholesky decomposition of Σ.
// Mahalanobis returns NaN if the linear solve fails.
//
// See https://en.wikipedia.org/wiki/Mahalanobis_distance for more information.
func Mahalanobis(x, y mat.Vector, chol *mat.Cholesky) float64 {
var diff mat.VecDense
diff.SubVec(x, y)
var tmp mat.VecDense
err := chol.SolveVecTo(&tmp, &diff)
if err != nil {
return math.NaN()
}
return math.Sqrt(mat.Dot(&tmp, &diff))
}