Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
406 changes: 240 additions & 166 deletions AGENTS.md

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,11 @@ val t = Tensor(
)

// Function to normalize a single feature vector
def normalize(x: Tensor1[Feature, Float]) : Tensor1[Feature, Float] =
def normalize(x: Tensor1[Feature, Float32]) : Tensor1[Feature, Float32] =
(x -! x.mean) /! x.std

// Apply the normalization function across the Batch dimension
val normalized: Tensor2[Batch, Feature, Float] =
val normalized: Tensor2[Batch, Feature, Float32] =
t.vmap(Axis[Batch])(normalize)
```

Expand Down
13 changes: 7 additions & 6 deletions core/src/main/scala/dimwit/autodiff/Autodiff.scala
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import dimwit.tensor.TupleHelpers.PrimeConcatType
import dimwit.jax.Jax
import me.shadaj.scalapy.py
import dimwit.tensor.Label
import dimwit.tensor.TensorOps.IsFloating

object Autodiff:

Expand All @@ -22,10 +23,10 @@ object Autodiff:
case Tensor[inS, v2] => Tensor[PrimeConcatType[OutShape, inS], V]

// TODO replace with TupledFunction when available (no longer experimental)
def grad[T1, T2, V](f: (T1, T2) => Tensor0[V])(using t1Tree: TensorTree[T1], t2Tree: TensorTree[T2], outTree: TensorTree[Tensor0[V]]): (T1, T2) => Grad[(T1, T2)] = (t1, t2) => grad(f.tupled)((t1, t2))
def grad[T1, T2, T3, V](f: (T1, T2, T3) => Tensor0[V])(using t1Tree: TensorTree[T1], t2Tree: TensorTree[T2], t3Tree: TensorTree[T3], outTree: TensorTree[Tensor0[V]]): (T1, T2, T3) => Grad[(T1, T2, T3)] = (t1, t2, t3) => grad(f.tupled)((t1, t2, t3))
def grad[T1, T2, V: IsFloating](f: (T1, T2) => Tensor0[V])(using t1Tree: TensorTree[T1], t2Tree: TensorTree[T2], outTree: TensorTree[Tensor0[V]]): (T1, T2) => Grad[(T1, T2)] = (t1, t2) => grad(f.tupled)((t1, t2))
def grad[T1, T2, T3, V: IsFloating](f: (T1, T2, T3) => Tensor0[V])(using t1Tree: TensorTree[T1], t2Tree: TensorTree[T2], t3Tree: TensorTree[T3], outTree: TensorTree[Tensor0[V]]): (T1, T2, T3) => Grad[(T1, T2, T3)] = (t1, t2, t3) => grad(f.tupled)((t1, t2, t3))

def grad[Input, V](f: Input => Tensor0[V])(using
def grad[Input, V: IsFloating](f: Input => Tensor0[V])(using
inTree: TensorTree[Input],
outTree: TensorTree[Tensor0[V]]
): Input => Grad[Input] =
Expand All @@ -42,10 +43,10 @@ object Autodiff:
val pyGrad = gpy(pyParams)
Grad(inTree.fromPyTree(pyGrad).asInstanceOf[Input])

def valueAndGrad[T1, T2, V](f: (T1, T2) => Tensor0[V])(using t1Tree: TensorTree[T1], t2Tree: TensorTree[T2], outTree: TensorTree[Tensor0[V]]): (T1, T2) => (Tensor0[V], Grad[(T1, T2)]) = (t1, t2) => valueAndGrad(f.tupled)((t1, t2))
def valueAndGrad[T1, T2, T3, V](f: (T1, T2, T3) => Tensor0[V])(using t1Tree: TensorTree[T1], t2Tree: TensorTree[T2], t3Tree: TensorTree[T3], outTree: TensorTree[Tensor0[V]]): (T1, T2, T3) => (Tensor0[V], Grad[(T1, T2, T3)]) = (t1, t2, t3) => valueAndGrad(f.tupled)((t1, t2, t3))
def valueAndGrad[T1, T2, V: IsFloating](f: (T1, T2) => Tensor0[V])(using t1Tree: TensorTree[T1], t2Tree: TensorTree[T2], outTree: TensorTree[Tensor0[V]]): (T1, T2) => (Tensor0[V], Grad[(T1, T2)]) = (t1, t2) => valueAndGrad(f.tupled)((t1, t2))
def valueAndGrad[T1, T2, T3, V: IsFloating](f: (T1, T2, T3) => Tensor0[V])(using t1Tree: TensorTree[T1], t2Tree: TensorTree[T2], t3Tree: TensorTree[T3], outTree: TensorTree[Tensor0[V]]): (T1, T2, T3) => (Tensor0[V], Grad[(T1, T2, T3)]) = (t1, t2, t3) => valueAndGrad(f.tupled)((t1, t2, t3))

def valueAndGrad[Input, V](f: Input => Tensor0[V])(using
def valueAndGrad[Input, V: IsFloating](f: Input => Tensor0[V])(using
inTree: TensorTree[Input],
outTree: TensorTree[Tensor0[V]]
): Input => (Tensor0[V], Grad[Input]) =
Expand Down
112 changes: 68 additions & 44 deletions core/src/main/scala/dimwit/autodiff/FloatTree.scala
Original file line number Diff line number Diff line change
Expand Up @@ -6,73 +6,97 @@ import scala.deriving.*
import scala.compiletime.*
import scala.util.NotGiven

/** A marker trait for structures that are trees of Float tensors.
/** A marker trait for structures that are trees of floating-point tensors.
* The given instances give evidence that the tensors are
* really of type float
* of type V, constrained by IsFloating.
*/
trait FloatTree[P]
trait FloatTree[P, V]

object FloatTree:

given [Q <: Tuple]: FloatTree[Tensor[Q, Float]] with {}
// 1. Base case for Tensors
given [Q <: Tuple, V: IsFloating]: FloatTree[Tensor[Q, V], V] with {}

given listInstance[A](using FloatTree[A]): FloatTree[List[A]] with {}
// 2. Inductive base cases for Tuples
// This allows the compiler to step through the case class fields and lock in V.
given emptyTuple[V]: FloatTree[EmptyTuple, V] with {}

given mapInstance[K, A](using FloatTree[A]): FloatTree[Map[K, A]] with {}
given consTuple[H, T <: Tuple, V](using
h: FloatTree[H, V],
t: FloatTree[T, V]
): FloatTree[H *: T, V] with {}

inline given derived[P <: Product](using m: Mirror.ProductOf[P]): FloatTree[P] =
summonAll[Tuple.Map[m.MirroredElemTypes, FloatTree]]
FloatTreeImpl[P]()
class FloatTreeImpl[P] extends FloatTree[P]
// 3. Standard collections
given listInstance[A, V](using FloatTree[A, V]): FloatTree[List[A], V] with {}

extension [P](p: P)(using tt: TensorTree[P], af: FloatTree[P])
/** Maps a function over the TensorTree, as for a regula rtensor tree,
* but provides knowledge that tensors are of type float
given mapInstance[K, A, V](using FloatTree[A, V]): FloatTree[Map[K, A], V] with {}

inline given derived[P <: Product, V](using
evNotTuple: NotGiven[P <:< Tuple],
m: Mirror.ProductOf[P],
evElems: FloatTree[m.MirroredElemTypes, V]
): FloatTree[P, V] =
FloatTreeImpl[P, V]()

class FloatTreeImpl[P, V] extends FloatTree[P, V]

extension [P, V](p: P)(using tt: TensorTree[P], ft: FloatTree[P, V], isF: IsFloating[V])
/** Maps a function over the TensorTree, as for a regular tensor tree,
* but provides knowledge that tensors are of type V
*/
def map(f: [T <: Tuple] => Labels[T] ?=> (Tensor[T, Float] => Tensor[T, Float])): P =
tt.map(p, [T <: Tuple, V] => (n: Labels[T]) ?=> (t: Tensor[T, V]) => f[T](using n)(t.asInstanceOf[Tensor[T, Float]]).asInstanceOf[Tensor[T, V]])
def map[NewV](f: [T <: Tuple] => Labels[T] ?=> (Tensor[T, V] => Tensor[T, NewV])): P =
tt.map(p, [T <: Tuple, V0] => (n: Labels[T]) ?=> (t: Tensor[T, V0]) => f[T](using n)(t.asInstanceOf[Tensor[T, V]]).asInstanceOf[Tensor[T, V0]])

/** Zipmaps a function over the TensorTree, as for tensor tree,
* but provides knowledge that tensors are of type float
* but provides knowledge that tensors are of type V
*/
def zipMap(p2: P, f: [T <: Tuple] => Labels[T] ?=> ((Tensor[T, Float], Tensor[T, Float]) => Tensor[T, Float])): P =
def zipMap(p2: P, f: [T <: Tuple] => Labels[T] ?=> ((Tensor[T, V], Tensor[T, V]) => Tensor[T, V])): P =
tt.zipMap(
p,
p2,
[T <: Tuple, V] => (n: Labels[T]) ?=> (t1: Tensor[T, V], t2: Tensor[T, V]) => f[T](using n)(t1.asInstanceOf[Tensor[T, Float]], t2.asInstanceOf[Tensor[T, Float]]).asInstanceOf[Tensor[T, V]]
[T <: Tuple, V0] => (n: Labels[T]) ?=> (t1: Tensor[T, V0], t2: Tensor[T, V0]) => f[T](using n)(t1.asInstanceOf[Tensor[T, V]], t2.asInstanceOf[Tensor[T, V]]).asInstanceOf[Tensor[T, V0]]
)

/** Arithmetic and math operations for tensor trees of floats.
/** Arithmetic and math operations for tensor trees of floating-point types.
*/
object ops:

// helper typeclass
trait IsFloatTensor[P]
object IsFloatTensor:
given [T <: Tuple]: IsFloatTensor[Tensor[T, Float]] with {}
trait IsFloatingTensor[P, V]
object IsFloatingTensor:
given [T <: Tuple, V: IsFloating]: IsFloatingTensor[Tensor[T, V], V] with {}

// Scalar broadcast extensions (Tensor0 op Tree)
extension (p2: Tensor0[Float])
def ++![P: TensorTree: FloatTree](p1: P): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => a +! p2)
def --![P: TensorTree: FloatTree](p1: P): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => a -! p2)
def **![P: TensorTree: FloatTree](p1: P): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => a *! p2)
def `//!`[P: TensorTree: FloatTree](p1: P): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => a /! p2)
extension [V: IsFloating](p2: Tensor0[V])
def ++![P](p1: P)(using TensorTree[P], FloatTree[P, V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => a +! p2)
def --![P](p1: P)(using TensorTree[P], FloatTree[P, V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => a -! p2)
def **![P](p1: P)(using TensorTree[P], FloatTree[P, V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => a *! p2)
def `//!`[P](p1: P)(using TensorTree[P], FloatTree[P, V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => a /! p2)

// Tree extensions (Tree op Tree, Tree op Scalar, and math ops)
// Excluded for bare Tensor[T, Float] to avoid conflicts with tensor's own operators
extension [P](p1: P)(using tt: TensorTree[P], af: FloatTree[P], ev: NotGiven[IsFloatTensor[P]])
def ++(p2: P): P = p1.zipMap(p2, [T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float], b: Tensor[T, Float]) => a + b)
def ++!(p2: Tensor0[Float]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => a +! p2)
def --(p2: P): P = p1.zipMap(p2, [T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float], b: Tensor[T, Float]) => a - b)
def --!(p2: Tensor0[Float]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => a -! p2)
def **(p2: P): P = p1.zipMap(p2, [T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float], b: Tensor[T, Float]) => a * b)
def **!(p2: Tensor0[Float]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => a *! p2)
def `//`(p2: P): P = p1.zipMap(p2, [T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float], b: Tensor[T, Float]) => a / b)
def `//!`(p2: Tensor0[Float]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => a /! p2)

def sqrt: P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => TensorOps.sqrt(a))
def pow(exponent: Tensor0[Float]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => TensorOps.pow(a)(exponent))
def scale(scalar: Tensor0[Float]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => TensorOps.scale(a)(scalar))
def sign: P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => TensorOps.sign(a))

def fillCopy(value: Float): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, Float]) => Tensor(a.shape).fill(value))
// Excluded for bare Tensor[T, V] to avoid conflicts with tensor's own operators
extension [P, V](p1: P)(using tt: TensorTree[P], ft: FloatTree[P, V], isF: IsFloating[V], ev: NotGiven[IsFloatingTensor[P, V]])
def ++(p2: P): P = p1.zipMap(p2, [T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V], b: Tensor[T, V]) => a + b)
def ++!(p2: Tensor0[V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => a +! p2)
def --(p2: P): P = p1.zipMap(p2, [T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V], b: Tensor[T, V]) => a - b)
def --!(p2: Tensor0[V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => a -! p2)
def **(p2: P): P = p1.zipMap(p2, [T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V], b: Tensor[T, V]) => a * b)
def **!(p2: Tensor0[V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => a *! p2)
def `//`(p2: P): P = p1.zipMap(p2, [T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V], b: Tensor[T, V]) => a / b)
def `//!`(p2: Tensor0[V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => a /! p2)

def sqrt: P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => TensorOps.sqrt(a))

def pow(exponent: Float): P = pow(Tensor0(VType[V])(exponent))
def pow(exponent: Tensor0[V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => TensorOps.pow(a)(exponent))
def scale(scalar: Tensor0[V]): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => TensorOps.scale(a)(scalar))
def sign: P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => TensorOps.sign(a))

def fillCopy(value: Float): P = p1.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => Tensor(a.shape, VType[V]).fill(value))

extension [F[_], V](p: F[V])(using tt: TensorTree[F[V]], ft: FloatTree[F[V], V], isF: IsFloating[V])

def asFloats[NewV: IsFloating](vtype: VType[NewV])(using m: Mirror.ProductOf[F[NewV]]): F[NewV] =
p.map([T <: Tuple] => (n: Labels[T]) ?=> (a: Tensor[T, V]) => a.asFloat(vtype)).asInstanceOf[F[NewV]]

type FloatTreeFor[V] = [P] =>> FloatTree[P, V]
12 changes: 11 additions & 1 deletion core/src/main/scala/dimwit/autodiff/Grad.scala
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package dimwit.autodiff

import dimwit.*
import dimwit.jax.Jax
import scala.deriving.Mirror

/** Type-level tag marking a parameter structure as gradients.
*
Expand Down Expand Up @@ -36,4 +37,13 @@ object Grad:
def fromPyTree(pyVal: Jax.PyAny): Grad[T] = Grad(ev.fromPyTree(pyVal))

// FloatTree witness for gradient math (++, --, scale, etc.)
given [T](using FloatTree[T]): FloatTree[Grad[T]] with {}
// given [T, V: IsFloating](using FloatTree[T, V]): FloatTree[Grad[T], V] with {}

// Bridge extension so we can call .asFloats directly on Grad[Params[V]]
extension [F[_], V](g: Grad[F[V]])(using
tt: TensorTree[F[V]],
ft: FloatTree[F[V], V],
isF: IsFloating[V]
)
def asFloats[NewV: IsFloating](vtype: VType[NewV])(using m: Mirror.ProductOf[F[NewV]]): Grad[F[NewV]] =
Grad(dimwit.FloatTree.ops.asFloats(g.value)(vtype))
4 changes: 4 additions & 0 deletions core/src/main/scala/dimwit/jax/JaxDType.scala
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ object JaxDType:
try
val dtypeStr = jaxDtype.name.as[String]
dtypeStr match
case "bfloat16" => DType.BFloat16
case "float16" => DType.Float16
case "float32" => DType.Float32
case "float64" => DType.Float64
case "int32" => DType.Int32
Expand All @@ -43,6 +45,8 @@ object JaxDType:
try
val jnp = Jax.jnp
dtype match
case DType.BFloat16 => jnp.bfloat16
case DType.Float16 => jnp.float16
case DType.Float32 => jnp.float32
case DType.Float64 => jnp.float64
case DType.Int32 => jnp.int32
Expand Down
7 changes: 4 additions & 3 deletions core/src/main/scala/dimwit/package.scala
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,10 @@ package object dimwit:
export dimwit.tensor.{Tensor, Tensor0, Tensor1, Tensor2, Tensor3}
export dimwit.tensor.{Shape, Shape0, Shape1, Shape2, Shape3}
export dimwit.tensor.DType
export dimwit.tensor.DType.{BFloat16, Float16, Float32, Float64, Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, Bool}

export dimwit.tensor.{
VType,
ExecutionType,
ExecutionTypeFor,
Label,
Labels,
Axis,
Expand All @@ -89,7 +89,8 @@ package object dimwit:
export dimwit.jax.EagerCleanup.eagerCleanup

object Conversions:
export dimwit.tensor.Tensor0.{float2FloatTensor, int2IntTensor, int2FloatTensor, boolean2BooleanTensor}
export dimwit.tensor.Tensor0.{boolean2BooleanTensor, byte2IntegerTensor, short2IntegerTensor, int2IntegerTensor, long2IntegerTensor, float2FloatingTensor, int2FloatingTensor, double2FloatingTensor}

// Export random object
export dimwit.random.Random
export dimwit.random.Random.Key
Expand Down
3 changes: 2 additions & 1 deletion core/src/main/scala/dimwit/random/Random.scala
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package dimwit.random

import dimwit.tensor.*
import dimwit.tensor.DType.Int32
import dimwit.tensor.TensorOps.*
import dimwit.jax.{Jax, JaxDType}
import dimwit.autodiff.TensorTree
Expand Down Expand Up @@ -83,7 +84,7 @@ object Random:
* @return
* A 1D tensor containing a random permutation of [0, 1, ..., n-1]
*/
def permutation[L: Label](dim: AxisExtent[L])(key: Key): Tensor1[L, Int] =
def permutation[L: Label](dim: AxisExtent[L])(key: Key): Tensor1[L, Int32] =
liftPyTensor(Jax.jrandom.permutation(key.jaxKey, dim.size))

object Key:
Expand Down
24 changes: 12 additions & 12 deletions core/src/main/scala/dimwit/stats/Distributions.scala
Original file line number Diff line number Diff line change
Expand Up @@ -7,32 +7,32 @@ import dimwit.jax.Jax.scipy_stats as jstats
import dimwit.jax.Jax.PyDynamic
import dimwit.tensor.TensorOps

opaque type LogProb = Float
opaque type Prob = Float
opaque type LogProb = Float32
opaque type Prob = Float32

object LogProb:

given IsFloating[LogProb] = summon[IsFloating[Float]]
given IsFloating[LogProb] = summon[IsFloating[Float32]]

def apply[T <: Tuple: Labels](t: Tensor[T, Float]): Tensor[T, LogProb] = t
def apply[T <: Tuple: Labels](t: Tensor[T, Float32]): Tensor[T, LogProb] = t

extension [T <: Tuple: Labels](t: Tensor[T, LogProb])

def exp: Tensor[T, Prob] = TensorOps.exp(t)
def log: Tensor[T, Float] = TensorOps.log(t) // Lose LogProb if we log again
def asFloat: Tensor[T, Float] = t
def log: Tensor[T, Float32] = TensorOps.log(t) // Lose LogProb if we log again
def asFloat: Tensor[T, Float32] = t

object Prob:

given IsFloating[Prob] = summon[IsFloating[Float]]
given IsFloating[Prob] = summon[IsFloating[Float32]]

def apply[T <: Tuple: Labels](t: Tensor[T, Float]): Tensor[T, Prob] = t
def apply[T <: Tuple: Labels](t: Tensor[T, Float32]): Tensor[T, Prob] = t

extension [T <: Tuple: Labels](t: Tensor[T, Prob])

def exp: Tensor[T, Float] = TensorOps.exp(t) // Lose Prob if we exp again
def exp: Tensor[T, Float32] = TensorOps.exp(t) // Lose Prob if we exp again
def log: Tensor[T, LogProb] = TensorOps.log(t)
def asFloat: Tensor[T, Float] = t
def asFloat: Tensor[T, Float32] = t

trait Distribution[EventShape <: Tuple: Labels, V]:

Expand All @@ -58,7 +58,7 @@ trait Distribution[EventShape <: Tuple: Labels, V]:
* @tparam EventShape Shape of the tensor of independent values
* @tparam V Value type
*/
trait IndependentDistribution[EventShape <: Tuple: Labels, V: ExecutionType] extends Distribution[EventShape, V]:
trait IndependentDistribution[EventShape <: Tuple: Labels, V] extends Distribution[EventShape, V]:

/** Element-wise log probabilities (primitive operation) */
def elementWiseLogProb(x: Tensor[EventShape, V]): Tensor[EventShape, LogProb]
Expand All @@ -78,7 +78,7 @@ object IndependentDistribution:
* Each element of the resulting tensor is an independent sample from
* the same univariate distribution.
*/
def fromUnivariate[EventShape <: Tuple: Labels, V: ExecutionType](
def fromUnivariate[EventShape <: Tuple: Labels, V](
shape: Shape[EventShape],
univariate: UnivariateDistribution[V]
): IndependentDistribution[EventShape, V] =
Expand Down
Loading
Loading