text
stringlengths 17
362k
| id
stringlengths 13
115
| metadata
dict | __index_level_0__
int64 0
75
|
---|---|---|---|
# global
from typing import Optional, Tuple
import torch
def unravel_index(
indices: torch.Tensor,
shape: Tuple[int],
/,
*,
out: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor]:
temp = indices.to(torch.int32)
output = []
for dim in reversed(shape):
output.append(temp % dim)
temp = temp // dim
return tuple(reversed(output))
unravel_index.support_native_out = False
| ivy/ivy/functional/backends/torch/experimental/searching.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/searching.py",
"repo_id": "ivy",
"token_count": 174
} | 26 |
# global
import torch
from typing import Optional, Literal, Union, List
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, backend_version)
def argsort(
x: torch.Tensor,
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if out is not None:
out = (torch.zeros(x.shape, dtype=x.dtype), out.long())
_, sorted_indices = torch.sort(
x, dim=axis, descending=descending, stable=stable, out=out
)
return sorted_indices
argsort.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, backend_version)
def sort(
x: torch.Tensor,
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if out is not None:
out = (out, torch.zeros(out.shape, dtype=torch.long))
sorted_tensor, _ = torch.sort(
x, dim=axis, descending=descending, stable=stable, out=out
)
return sorted_tensor
sort.support_native_out = True
# msort
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, backend_version)
def msort(
a: Union[torch.Tensor, list, tuple], /, *, out: Optional[torch.Tensor] = None
) -> torch.Tensor:
return torch.msort(a, out=out)
msort.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, backend_version)
def searchsorted(
x: torch.Tensor,
v: torch.Tensor,
/,
*,
side: Literal["left", "right"] = "left",
sorter: Optional[Union[torch.Tensor, List[int]]] = None,
ret_dtype: torch.dtype = torch.int64,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
assert ivy.is_int_dtype(ret_dtype), TypeError(
"only Integer data types are supported for ret_dtype."
)
if sorter is not None:
sorter_dtype = ivy.as_native_dtype(sorter.dtype)
assert ivy.is_int_dtype(sorter_dtype), TypeError(
f"Only signed integer data type for sorter is allowed, got {sorter_dtype }."
)
if sorter_dtype is not torch.int64:
sorter = sorter.to(torch.int64)
ret_dtype = ivy.as_native_dtype(ret_dtype)
func_out = out
if ivy.exists(out) and out.dtype != ret_dtype:
func_out = None
if ret_dtype is torch.int64:
return torch.searchsorted(
x,
v,
sorter=sorter,
side=side,
out_int32=False,
out=func_out,
)
elif ret_dtype is torch.int32:
return torch.searchsorted(
x,
v,
sorter=sorter,
side=side,
out_int32=True,
out=func_out,
)
if ivy.exists(out):
return ivy.inplace_update(
out, torch.searchsorted(x, v, sorter=sorter, side=side).to(out.dtype)
)
return torch.searchsorted(x, v, sorter=sorter, side=side).to(ret_dtype)
searchsorted.support_native_out = True
| ivy/ivy/functional/backends/torch/sorting.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/sorting.py",
"repo_id": "ivy",
"token_count": 1426
} | 27 |
# global
# local
import ivy
import ivy.functional.frontends.jax as jax_frontend
from ivy.func_wrapper import with_unsupported_dtypes
class Array:
def __init__(self, array, weak_type=False):
self._ivy_array = array if isinstance(array, ivy.Array) else ivy.array(array)
self.weak_type = weak_type
def __repr__(self):
main = (
str(self.ivy_array.__repr__())
.replace("ivy.array", "ivy.frontends.jax.Array")
.replace(")", "")
+ ", dtype="
+ str(self.ivy_array.dtype)
)
if self.weak_type:
return main + ", weak_type=True)"
return main + ")"
# Properties #
# ---------- #
@property
def ivy_array(self):
return self._ivy_array
@property
def dtype(self):
return self.ivy_array.dtype
@property
def shape(self):
return tuple(self.ivy_array.shape.shape)
@property
def at(self):
return jax_frontend._src.numpy.lax_numpy._IndexUpdateHelper(self.ivy_array)
@property
def T(self):
return self.ivy_array.T
@property
def ndim(self):
return self.ivy_array.ndim
# Instance Methods #
# ---------------- #
def copy(self, order=None):
return jax_frontend.numpy.copy(self._ivy_array, order=order)
def diagonal(self, offset=0, axis1=0, axis2=1):
return jax_frontend.numpy.diagonal(
self._ivy_array, offset=offset, axis1=axis1, axis2=axis2
)
def all(self, *, axis=None, out=None, keepdims=False):
return jax_frontend.numpy.all(
self._ivy_array, axis=axis, keepdims=keepdims, out=out
)
def astype(self, dtype):
try:
return jax_frontend.numpy.asarray(self, dtype=dtype)
except: # noqa: E722
raise ivy.utils.exceptions.IvyException(
f"Dtype {self.dtype} is not castable to {dtype}"
)
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
def argmax(
self,
/,
*,
axis=None,
out=None,
keepdims=False,
):
return jax_frontend.numpy.argmax(
self,
axis=axis,
out=out,
keepdims=keepdims,
)
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
def argmin(
self,
/,
*,
axis=None,
out=None,
keepdims=False,
):
return jax_frontend.numpy.argmin(
self,
axis=axis,
out=out,
keepdims=keepdims,
)
def squeeze(self, axis=None):
return jax_frontend.numpy.squeeze(self, axis=axis)
def conj(self, /):
return jax_frontend.numpy.conj(self._ivy_array)
def conjugate(self, /):
return jax_frontend.numpy.conjugate(self._ivy_array)
def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, where=None):
return jax_frontend.numpy.mean(
self._ivy_array,
axis=axis,
dtype=dtype,
out=out,
keepdims=keepdims,
where=where,
)
def cumprod(self, axis=None, dtype=None, out=None):
return jax_frontend.numpy.cumprod(
self,
axis=axis,
dtype=dtype,
out=out,
)
def cumsum(self, axis=None, dtype=None, out=None):
return jax_frontend.numpy.cumsum(
self,
axis=axis,
dtype=dtype,
out=out,
)
def nonzero(self, *, size=None, fill_value=None):
return jax_frontend.numpy.nonzero(
self,
size=size,
fill_value=fill_value,
)
def prod(
self,
axis=None,
dtype=None,
keepdims=False,
initial=None,
where=None,
promote_integers=True,
out=None,
):
return jax_frontend.numpy.product(
self,
axis=axis,
dtype=self.dtype,
keepdims=keepdims,
initial=initial,
where=where,
promote_integers=promote_integers,
out=out,
)
def ravel(self, order="C"):
return jax_frontend.numpy.ravel(
self,
order=order,
)
flatten = ravel
def sort(self, axis=-1, order=None):
return jax_frontend.numpy.sort(
self,
axis=axis,
order=order,
)
def sum(
self,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=None,
where=None,
promote_integers=True,
):
return jax_frontend.numpy.sum(
self,
axis=axis,
dtype=dtype,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
promote_integers=promote_integers,
)
def argsort(self, axis=-1, kind="stable", order=None):
return jax_frontend.numpy.argsort(self, axis=axis, kind=kind, order=order)
def any(self, *, axis=None, out=None, keepdims=False, where=None):
return jax_frontend.numpy.any(
self._ivy_array, axis=axis, keepdims=keepdims, out=out, where=where
)
def reshape(self, *args, order="C"):
if not isinstance(args[0], int):
if len(args) > 1:
raise TypeError(
"Shapes must be 1D sequences of concrete values of integer type,"
f" got {args}."
)
args = args[0]
return jax_frontend.numpy.reshape(self, tuple(args), order)
def __add__(self, other):
return jax_frontend.numpy.add(self, other)
def __radd__(self, other):
return jax_frontend.numpy.add(other, self)
def __sub__(self, other):
return jax_frontend.lax.sub(self, other)
def __rsub__(self, other):
return jax_frontend.lax.sub(other, self)
def __mul__(self, other):
return jax_frontend.lax.mul(self, other)
def __rmul__(self, other):
return jax_frontend.lax.mul(other, self)
def __div__(self, other):
return jax_frontend.numpy.divide(self, other)
def __rdiv__(self, other):
return jax_frontend.numpy.divide(other, self)
def __mod__(self, other):
return jax_frontend.numpy.mod(self, other)
def __rmod__(self, other):
return jax_frontend.numpy.mod(other, self)
def __truediv__(self, other):
return jax_frontend.numpy.divide(self, other)
def __rtruediv__(self, other):
return jax_frontend.numpy.divide(other, self)
def __matmul__(self, other):
return jax_frontend.numpy.dot(self, other)
def __rmatmul__(self, other):
return jax_frontend.numpy.dot(other, self)
def __pos__(self):
return self
def __neg__(self):
return jax_frontend.lax.neg(self)
def __eq__(self, other):
return jax_frontend.lax.eq(self, other)
def __ne__(self, other):
return jax_frontend.lax.ne(self, other)
def __lt__(self, other):
return jax_frontend.lax.lt(self, other)
def __le__(self, other):
return jax_frontend.lax.le(self, other)
def __gt__(self, other):
return jax_frontend.lax.gt(self, other)
def __ge__(self, other):
return jax_frontend.lax.ge(self, other)
def __abs__(self):
return jax_frontend.numpy.abs(self)
def __pow__(self, other):
return jax_frontend.lax.pow(self, other)
def __rpow__(self, other):
other = ivy.asarray(other)
return jax_frontend.lax.pow(other, self)
def __and__(self, other):
return jax_frontend.numpy.bitwise_and(self, other)
def __rand__(self, other):
return jax_frontend.numpy.bitwise_and(other, self)
def __or__(self, other):
return jax_frontend.numpy.bitwise_or(self, other)
def __ror__(self, other):
return jax_frontend.numpy.bitwise_or(other, self)
def __xor__(self, other):
return jax_frontend.lax.bitwise_xor(self, other)
def __rxor__(self, other):
return jax_frontend.lax.bitwise_xor(other, self)
def __invert__(self):
return jax_frontend.lax.bitwise_not(self)
def __lshift__(self, other):
return jax_frontend.lax.shift_left(self, other)
def __rlshift__(self, other):
return jax_frontend.lax.shift_left(other, self)
def __rshift__(self, other):
return jax_frontend.lax.shift_right_logical(self, other)
def __rrshift__(self, other):
return jax_frontend.lax.shift_right_logical(other, self)
def __getitem__(self, idx):
return self.at[idx].get()
def __setitem__(self, idx, val):
raise ivy.utils.exceptions.IvyException(
"ivy.functional.frontends.jax.Array object doesn't support assignment"
)
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d Array not supported")
for i in range(self.shape[0]):
yield self[i]
def round(self, decimals=0):
return jax_frontend.numpy.round(self, decimals)
def repeat(self, repeats, axis=None, *, total_repeat_length=None):
return jax_frontend.numpy.repeat(self, repeats, axis=axis)
def searchsorted(self, v, side="left", sorter=None, *, method="scan"):
return jax_frontend.numpy.searchsorted(self, v, side=side, sorter=sorter)
def max(
self,
/,
*,
axis=None,
out=None,
keepdims=False,
where=None,
):
return jax_frontend.numpy.max(
self, axis=axis, out=out, keepdims=keepdims, where=where
)
def ptp(self, *, axis=None, out=None, keepdims=False):
return jax_frontend.numpy.ptp(self, axis=axis, keepdims=keepdims)
def min(
self,
/,
*,
axis=None,
out=None,
keepdims=False,
where=None,
):
return jax_frontend.numpy.min(
self, axis=axis, out=out, keepdims=keepdims, where=where
)
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=None
):
return jax_frontend.numpy.std(
self,
axis=axis,
dtype=dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
)
def var(
self, *, axis=None, dtype=None, out=None, ddof=False, keepdims=False, where=None
):
return jax_frontend.numpy.var(
self._ivy_array,
axis=axis,
dtype=dtype,
out=out,
ddof=int(ddof),
keepdims=keepdims,
where=where,
)
def swapaxes(self, axis1, axis2):
return jax_frontend.numpy.swapaxes(self, axis1=axis1, axis2=axis2)
# Jax supports DeviceArray from 0.4.13 and below
# Hence aliasing it here
DeviceArray = Array
| ivy/ivy/functional/frontends/jax/array.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/array.py",
"repo_id": "ivy",
"token_count": 5697
} | 28 |
# global
import inspect
import abc
# local
import ivy
from ivy.functional.frontends.jax.func_wrapper import (
to_ivy_arrays_and_back,
)
from .creation import linspace, arange, array
from .manipulations import transpose, concatenate, expand_dims
class _AxisConcat(abc.ABC):
axis: int
ndmin: int
trans1d: int
def __getitem__(self, key):
key_tup = key if isinstance(key, tuple) else (key,)
params = [self.axis, self.ndmin, self.trans1d, -1]
directive = key_tup[0]
if isinstance(directive, str):
key_tup = key_tup[1:]
# check two special cases: matrix directives
if directive == "r":
params[-1] = 0
elif directive == "c":
params[-1] = 1
else:
vec = directive.split(",")
k = len(vec)
if k < 4:
vec += params[k:]
else:
# ignore everything after the first three comma-separated ints
vec = vec[:3] + [params[-1]]
try:
params = list(map(int, vec))
except ValueError as err:
raise ValueError(
f"could not understand directive {directive!r}"
) from err
axis, ndmin, trans1d, matrix = params
output = []
for item in key_tup:
if isinstance(item, slice):
newobj = _make_1d_grid_from_slice(item)
item_ndim = 0
elif isinstance(item, str):
raise TypeError("string directive must be placed at the beginning")
else:
newobj = array(item, copy=False)
item_ndim = newobj.ndim
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1 and ndmin - item_ndim > 0:
shape_obj = tuple(range(ndmin))
# Calculate number of left shifts, with overflow protection by mod
num_lshifts = ndmin - abs(ndmin + trans1d + 1) % ndmin
shape_obj = tuple(shape_obj[num_lshifts:] + shape_obj[:num_lshifts])
newobj = transpose(newobj, shape_obj)
output.append(newobj)
res = concatenate(tuple(output), axis=axis)
if matrix != -1 and res.ndim == 1:
# insert 2nd dim at axis 0 or 1
res = expand_dims(res, matrix)
return res
def __len__(self) -> int:
return 0
class RClass(_AxisConcat):
axis = 0
ndmin = 1
trans1d = -1
class CClass(_AxisConcat):
axis = -1
ndmin = 2
trans1d = 0
# --- Helpers --- #
# --------------- #
def _make_1d_grid_from_slice(s):
step = 1 if s.step is None else s.step
start = 0 if s.start is None else s.start
if s.step is not None and ivy.is_complex_dtype(s.step):
newobj = linspace(start, s.stop, int(abs(step)))
else:
newobj = arange(start, s.stop, step)
return newobj
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def choose(arr, choices, out=None, mode="raise"):
return ivy.choose(arr, choices, out=out, mode=mode)
@to_ivy_arrays_and_back
def diag(v, k=0):
return ivy.diag(v, k=k)
@to_ivy_arrays_and_back
def diag_indices(n, ndim=2):
idx = ivy.arange(n, dtype=int)
return (idx,) * ndim
@to_ivy_arrays_and_back
def diag_indices_from(arr):
print(arr)
n = arr.shape[0]
ndim = ivy.get_num_dims(arr)
if not all(arr.shape[i] == n for i in range(ndim)):
raise ValueError("All dimensions of input must be of equal length")
idx = ivy.arange(n, dtype=int)
return (idx,) * ndim
@to_ivy_arrays_and_back
def diagonal(a, offset=0, axis1=0, axis2=1):
return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)
@to_ivy_arrays_and_back
def indices(dimensions, dtype=int, sparse=False):
if sparse:
return tuple(
ivy.arange(dim)
.expand_dims(
axis=[j for j in range(len(dimensions)) if i != j],
)
.astype(dtype)
for i, dim in enumerate(dimensions)
)
else:
grid = ivy.meshgrid(*[ivy.arange(dim) for dim in dimensions], indexing="ij")
return ivy.stack(grid, axis=0).astype(dtype)
@to_ivy_arrays_and_back
def mask_indices(n, mask_func, k=0):
mask_func_obj = inspect.unwrap(mask_func)
mask_func_name = mask_func_obj.__name__
try:
ivy_mask_func_obj = getattr(ivy.functional.frontends.jax.numpy, mask_func_name)
a = ivy.ones((n, n))
mask = ivy_mask_func_obj(a, k=k)
indices = ivy.argwhere(mask.ivy_array)
return indices[:, 0], indices[:, 1]
except AttributeError as e:
print(f"Attribute error: {e}")
@to_ivy_arrays_and_back
def take_along_axis(arr, indices, axis, mode="fill"):
return ivy.take_along_axis(arr, indices, axis, mode=mode)
@to_ivy_arrays_and_back
def tril_indices(n, k=0, m=None):
return ivy.tril_indices(n, m, k)
@to_ivy_arrays_and_back
def tril_indices_from(arr, k=0):
return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)
@to_ivy_arrays_and_back
def triu_indices(n, k=0, m=None):
return ivy.triu_indices(n, m, k)
@to_ivy_arrays_and_back
def triu_indices_from(arr, k=0):
return ivy.triu_indices(arr.shape[-2], arr.shape[-1], k)
@to_ivy_arrays_and_back
def unravel_index(indices, shape):
ret = [x.astype(indices.dtype) for x in ivy.unravel_index(indices, shape)]
return tuple(ret)
c_ = CClass()
r_ = RClass()
| ivy/ivy/functional/frontends/jax/numpy/indexing.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/indexing.py",
"repo_id": "ivy",
"token_count": 2762
} | 29 |
# local
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
from ivy.functional.ivy.experimental.layers import _broadcast_pooling_helper
# --- Helpers --- #
# --------------- #
def _conv(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
dims = len(input.shape) - 2
_valid_shapes(input, weight, bias, stride, padding, groups)
if isinstance(padding, str):
padding = padding.upper()
else:
if isinstance(padding, int):
padding = [*[(padding, padding) for _ in range(dims)]]
else:
padding = [*[(p, p) for p in padding]]
ret = ivy.conv(
input,
weight,
stride,
padding,
dims=dims,
data_format="channel_first",
filter_format="channel_first",
dilations=dilation,
feature_group_count=groups,
)
if bias is not None:
return ivy.add(ret, ivy.expand_dims(bias, axis=(0, *range(2, dims + 2))))
return ret
def _valid_shapes(input, weight, bias, stride, padding, groups, transpose=False):
in_channels = input.shape[1]
out_channels = weight.shape[0] if not transpose else weight.shape[1] * groups
ivy.utils.assertions.check_equal(
in_channels % groups,
0,
message="in_channels must be divisible by groups",
as_array=False,
)
ivy.utils.assertions.check_equal(
out_channels % groups,
0,
message="out_channels must be divisible by groups",
as_array=False,
)
if bias is not None:
ivy.utils.assertions.check_equal(
bias.shape[0],
out_channels,
message="bias must be same shape as out_channels",
as_array=False,
)
if padding == "same":
if isinstance(stride, int):
ivy.utils.assertions.check_equal(
stride,
1,
message="padding cannot be 'same' for stride > 1",
as_array=False,
)
else:
for i in stride:
ivy.utils.assertions.check_equal(
i,
1,
message="padding cannot be 'same' for stride > 1",
as_array=False,
)
if not transpose:
in_channels_by_groups = weight.shape[1]
ivy.utils.assertions.check_equal(
in_channels,
in_channels_by_groups * groups,
message="in_channels must be consistent between input and weight",
as_array=False,
)
else:
ivy.utils.assertions.check_equal(
in_channels,
weight.shape[0],
message="in_channels must be consistent between input and weight",
as_array=False,
)
# --- Main --- #
# ------------ #
@with_supported_dtypes(
{"2.0.0 and below": ("float16", "float32", "float64")}, "mindspore"
)
@to_ivy_arrays_and_back
def adaptive_avg_pool2d(input, output_size):
return ivy.adaptive_avg_pool2d(input, output_size, data_format="NCHW")
@to_ivy_arrays_and_back
def avg_pool2d(
input,
kernel_size=1,
stride=1,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=0,
):
# Figure out input dims N
input_rank = input.ndim
if input_rank == 4:
# NCHW
data_format = "NCHW"
kernel_size = _broadcast_pooling_helper(kernel_size, "2d", name="kernel_size")
stride = _broadcast_pooling_helper(stride, "2d", name="stride")
padding = _broadcast_pooling_helper(padding, "2d", name="padding")
kernel_pads = list(zip(kernel_size, padding))
# Padding should be less than or equal to half of kernel size
if not all(pad <= kernel / 2 for kernel, pad in kernel_pads):
raise ValueError(
"pad should be smaller than or equal to half of kernel size, "
f"but got padding={padding}, kernel_size={kernel_size}. "
)
# Figure out padding string
if all(pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in kernel_pads):
padding_str = "SAME"
else:
padding_str = "VALID"
return ivy.avg_pool2d(
input,
kernel_size,
stride,
padding_str,
data_format=data_format,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
divisor_override=divisor_override,
)
@with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
def conv1d(
input,
weight,
bias=None,
stride=1,
pad_mode="valid",
padding=0,
dilation=1,
groups=1,
):
if pad_mode in ["valid", "same"]:
padding = pad_mode
elif pad_mode == "pad":
padding = padding
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
return _conv(input, weight, bias, stride, padding, dilation, groups)
@with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
def conv2d(
input,
weight,
bias=None,
stride=1,
pad_mode="valid",
padding=0,
dilation=1,
groups=1,
):
if pad_mode in ["valid", "same"]:
padding = pad_mode
elif pad_mode == "pad":
padding = padding
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
return _conv(input, weight, bias, stride, padding, dilation, groups)
@with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
def conv3d(
input,
weight,
bias=None,
stride=1,
pad_mode="valid",
padding=0,
dilation=1,
groups=1,
):
if pad_mode in ["valid", "same"]:
padding = pad_mode
elif pad_mode == "pad":
padding = padding
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
return _conv(input, weight, bias, stride, padding, dilation, groups)
@with_supported_dtypes(
{
"2.0.0 and below": (
"int8",
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
)
},
"mindspore",
)
@to_ivy_arrays_and_back
def dropout2d(input, p=0.5, training=True):
return ivy.dropout2d(input, p, training=training, data_format="NCHW")
@with_supported_dtypes(
{
"2.0.0 and below": (
"int8",
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
)
},
"mindspore",
)
@to_ivy_arrays_and_back
def dropout3d(input, p=0.5, training=True):
return ivy.dropout3d(input, p, training=training, data_format="NCDHW")
@with_supported_dtypes(
{"2.0.0 and below": ("float16", "float32", "float64")},
"mindspore",
)
@to_ivy_arrays_and_back
def fast_gelu(input_x):
return (input_x / (1 + ivy.exp(-1.702 * ivy.abs(input_x)))) * ivy.exp(
0.851 * (input_x - ivy.abs(input_x))
)
@to_ivy_arrays_and_back
def flatten(input, order="C", *, start_dim=1, end_dim=-1):
return ivy.flatten(input, order=order, start_dim=start_dim, end_dim=end_dim)
@with_supported_dtypes({"2.0.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
def gumbel_softmax(logits, tau=1, hard=False, dim=-1):
gumbels = -ivy.empty_like(logits).exponential().log()
gumbels = (logits + gumbels) / tau
y_soft = ivy.softmax(gumbels, axis=dim)
if hard:
indices = y_soft.max(axis=dim, keepdims=True)[1]
y_hard = ivy.zeros_like(logits)
updates = ivy.ones_like(indices)
y_hard = ivy.scatter_nd(indices, updates, reduction="replace", out=y_hard)
ret = y_hard - y_soft.stop_gradient(preserve_type=True) + y_soft
else:
ret = y_soft
return ret
@with_supported_dtypes(
{
"2.0 and below": (
"int8",
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
)
},
"mindspore",
)
@to_ivy_arrays_and_back
def hardswish(x):
return ivy.hardswish(x)
@with_supported_dtypes(
{
"2.0.0 and below": (
"int8",
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
)
},
"mindspore",
)
@to_ivy_arrays_and_back
def interpolate(
input,
size=None,
scale_factor=None,
mode="nearest",
align_corners=False,
recompute_scale_factor=False,
):
return ivy.interpolate(
input,
size,
scale_factor=scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor,
)
def kl_div(logits, labels, reduction="mean"):
"""Computes the Kullback-Leibler (KL) Divergence between the logits and the
labels.
Parameters
----------
logits (numpy array): The input logits array.
labels (numpy array): The label array which has the same shape as logits.
reduction (str): Specifies the reduction to be applied to the output.
Its value must be one of 'none', 'mean', 'batchmean',
or 'sum'. Default: 'mean'.
Returns
-------
float or numpy array: If reduction is 'none', then output is
a numpy array and has the same shape as logits.
Otherwise, it is a scalar (float).
"""
assert ivy.shape(logits) == ivy.shape(
labels
), "logits and labels must have the same shape."
L = labels * (ivy.log(labels) - logits)
if reduction == "none":
return L
elif reduction == "mean":
return ivy.mean(L)
elif reduction == "batchmean":
return ivy.mean(L, axis=0)
elif reduction == "sum":
return ivy.sum(L)
else:
raise ValueError(
"Invalid reduction mode. Supported values are 'none', 'mean', 'batchmean',"
" or 'sum'."
)
@with_supported_dtypes({"2.0.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
def log_softmax(input, axis=-1):
return ivy.log_softmax(input)
@with_supported_dtypes(
{
"2.0.0 and below": (
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
)
},
"mindspore",
)
@to_ivy_arrays_and_back
def max_pool3d(
input,
kernel_size,
stride=None,
padding=0,
dilation=1,
ceil_mode=False,
return_indices=False,
):
# ToDo: Add return_indices once superset in implemented
if not stride:
stride = kernel_size
data_format = "NCDHW"
return ivy.max_pool3d(
input,
kernel_size,
stride,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
)
@with_supported_dtypes(
{
"2.0 and below": (
"int8",
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
)
},
"mindspore",
)
@to_ivy_arrays_and_back
def pad(input, pad_width, mode="constant", constant_values=0):
return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)
@with_supported_dtypes({"2.0.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
def selu(input_x):
return ivy.selu(input_x)
@with_supported_dtypes({"2.0.0 and below": ("float32", "float64")}, "mindspore")
@to_ivy_arrays_and_back
def softshrink(x, lambd=0.5):
low = ivy.where(ivy.less(input, -lambd), ivy.add(input, lambd), 0)
up = ivy.where(ivy.greater(input, lambd), ivy.subtract(input, lambd), 0)
return ivy.add(low, up)
@with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
def softsign(x):
return ivy.divide(x, ivy.add(1, ivy.abs(x)))
| ivy/ivy/functional/frontends/mindspore/ops/function/nn_func.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mindspore/ops/function/nn_func.py",
"repo_id": "ivy",
"token_count": 5949
} | 30 |
from . import from_shape_or_value
from .from_shape_or_value import *
from . import from_existing_data
from .from_existing_data import *
from . import numerical_ranges
from .numerical_ranges import *
from . import building_matrices
from .building_matrices import *
from . import matrix_class
from .matrix_class import *
| ivy/ivy/functional/frontends/numpy/creation_routines/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/creation_routines/__init__.py",
"repo_id": "ivy",
"token_count": 94
} | 31 |
import inspect
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
)
@to_ivy_arrays_and_back
def diag_indices(n, ndim=2):
idx = ivy.arange(n)
res = ivy.array((idx,) * ndim)
res = tuple(res.astype("int64"))
return res
@to_ivy_arrays_and_back
def indices(dimensions, dtype=int, sparse=False):
return ivy.indices(dimensions, dtype=dtype, sparse=sparse)
@to_ivy_arrays_and_back
def mask_indices(n, mask_func, k=0):
mask_func_obj = inspect.unwrap(mask_func)
mask_func_name = mask_func_obj.__name__
try:
ivy_mask_func_obj = getattr(ivy.functional.frontends.numpy, mask_func_name)
a = ivy.ones((n, n))
mask = ivy_mask_func_obj(a, k=k)
indices = ivy.argwhere(mask.ivy_array)
ret = indices[:, 0], indices[:, 1]
return tuple(ret)
except AttributeError as e:
print(f"Attribute error: {e}")
@to_ivy_arrays_and_back
def tril_indices(n, k=0, m=None):
return ivy.tril_indices(n, m, k)
@to_ivy_arrays_and_back
def tril_indices_from(arr, k=0):
return ivy.tril_indices(arr.shape[0], arr.shape[1], k)
# unravel_index
@to_ivy_arrays_and_back
def unravel_index(indices, shape, order="C"):
ret = [x.astype("int64") for x in ivy.unravel_index(indices, shape)]
return tuple(ret)
| ivy/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py",
"repo_id": "ivy",
"token_count": 610
} | 32 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_casting,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
# --- Helpers --- #
# --------------- #
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _logical_and(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.logical_and(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _logical_not(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.logical_not(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _logical_or(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.logical_or(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _logical_xor(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.logical_xor(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
| ivy/ivy/functional/frontends/numpy/logic/logical_operations.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/logic/logical_operations.py",
"repo_id": "ivy",
"token_count": 1007
} | 33 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
)
@to_ivy_arrays_and_back
def pad(array, pad_width, mode="constant", **kwargs):
return ivy.pad(array, pad_width, mode=mode, **kwargs)
| ivy/ivy/functional/frontends/numpy/manipulation_routines/padding_arrays.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/padding_arrays.py",
"repo_id": "ivy",
"token_count": 100
} | 34 |
# global
import ivy
# local
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
import ivy.functional.frontends.numpy as np_frontend
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
def cumprod(a, /, axis=None, dtype=None, out=None):
return ivy.cumprod(a, axis=axis, dtype=dtype, out=out)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
def cumsum(a, /, axis=None, dtype=None, out=None):
return ivy.cumsum(a, axis=axis, dtype=dtype, out=out)
@to_ivy_arrays_and_back
def diff(x, /, *, n=1, axis=-1, prepend=None, append=None):
return ivy.diff(x, n=n, axis=axis, prepend=prepend, append=append)
@to_ivy_arrays_and_back
def ediff1d(ary, to_end=None, to_begin=None):
diffs = ivy.diff(ary)
if to_begin is not None:
if not isinstance(to_begin, (list, tuple)):
to_begin = [to_begin]
to_begin = ivy.array(to_begin)
diffs = ivy.concat((to_begin, diffs))
if to_end is not None:
if not isinstance(to_end, (list, tuple)):
to_end = [to_end]
to_end = ivy.array(to_end)
diffs = ivy.concat((diffs, to_end))
return diffs
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
def nancumprod(a, /, axis=None, dtype=None, out=None):
a = ivy.where(ivy.isnan(a), ivy.ones_like(a), a)
return ivy.cumprod(a, axis=axis, dtype=dtype, out=out)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
def nancumsum(a, /, axis=None, dtype=None, out=None):
a = ivy.where(ivy.isnan(a), ivy.zeros_like(a), a)
return ivy.cumsum(a, axis=axis, dtype=dtype, out=out)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nanprod(
a, /, *, axis=None, dtype=None, out=None, keepdims=False, initial=None, where=None
):
fill_values = ivy.ones_like(a)
a = ivy.where(ivy.isnan(a), fill_values, a)
if ivy.is_array(where):
a = ivy.where(where, a, ivy.default(out, fill_values), out=out)
if initial is not None:
a[axis] = 1
s = ivy.shape(a, as_array=False)
header = ivy.full(s, initial)
a = ivy.concat([header, a], axis=axis)
return ivy.prod(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nansum(
a, /, *, axis=None, dtype=None, out=None, keepdims=False, initial=None, where=None
):
fill_values = ivy.zeros_like(a)
a = ivy.where(ivy.isnan(a), fill_values, a)
if ivy.is_array(where):
a = ivy.where(where, a, ivy.default(out, fill_values), out=out)
if initial is not None:
a[axis] = 1
s = ivy.shape(a, as_array=False)
header = ivy.full(s, initial)
a = ivy.concat([header, a], axis=axis)
return ivy.sum(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def prod(
x,
/,
*,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=None,
where=True,
):
if where is not True:
x = ivy.where(where, x, ivy.default(out, ivy.ones_like(x)), out=out)
if initial is not None:
initial = np_frontend.array(initial, dtype=dtype).tolist()
if axis is not None:
s = ivy.to_list(ivy.shape(x, as_array=True))
s[axis] = 1
header = ivy.full(ivy.Shape(tuple(s)), initial)
x = ivy.concat([header, x], axis=axis)
else:
x[0] *= initial
return ivy.prod(x, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def sum(
x,
/,
*,
axis=None,
dtype=None,
keepdims=False,
out=None,
initial=None,
where=True,
):
if ivy.is_array(where):
x = ivy.where(where, x, ivy.default(out, ivy.zeros_like(x)), out=out)
if initial is not None:
s = ivy.to_list(ivy.shape(x, as_array=True))
s[axis] = 1
header = ivy.full(ivy.Shape(tuple(s)), initial)
if ivy.is_array(where):
x = ivy.where(where, x, ivy.default(out, ivy.zeros_like(x)), out=out)
x = ivy.concat([header, x], axis=axis)
else:
x = ivy.where(ivy.isnan(x), ivy.zeros_like(x), x)
return ivy.sum(x, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@to_ivy_arrays_and_back
def trapz(y, x=None, dx=1.0, axis=-1):
return ivy.trapz(y, x=x, dx=dx, axis=axis)
| ivy/ivy/functional/frontends/numpy/mathematical_functions/sums_products_differences.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/mathematical_functions/sums_products_differences.py",
"repo_id": "ivy",
"token_count": 2315
} | 35 |
# local
import ivy
from ivy.functional.frontends.numpy import promote_types_of_numpy_inputs
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
# --- Helpers --- #
# --------------- #
# nanargmin and nanargmax composition helper
def _nanargminmax(a, axis=None):
# check nans
nans = ivy.isnan(a).astype(ivy.bool)
# replace nans with inf
a = ivy.where(nans, ivy.inf, a)
if nans is not None:
nans = ivy.all(nans, axis=axis)
if ivy.any(nans):
raise ivy.utils.exceptions.IvyError("All-NaN slice encountered")
return a
# --- Main --- #
# ------------ #
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def argmax(
a,
/,
*,
axis=None,
out=None,
keepdims=False,
):
return ivy.argmax(a, axis=axis, out=out, keepdims=keepdims)
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def argmin(a, /, *, axis=None, keepdims=False, out=None):
return ivy.argmin(a, axis=axis, out=out, keepdims=keepdims)
@to_ivy_arrays_and_back
def argwhere(a):
return ivy.argwhere(a)
@to_ivy_arrays_and_back
def extract(cond, arr, /):
if cond.dtype == "bool":
return arr[cond]
else:
return arr[cond != 0]
@to_ivy_arrays_and_back
def flatnonzero(a):
return ivy.nonzero(ivy.reshape(a, (-1,)))
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nanargmax(a, /, *, axis=None, out=None, keepdims=False):
a = _nanargminmax(a, axis=axis)
return ivy.argmax(a, axis=axis, keepdims=keepdims, out=out)
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nanargmin(a, /, *, axis=None, out=None, keepdims=False):
a = _nanargminmax(a, axis=axis)
return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)
@to_ivy_arrays_and_back
def nonzero(a):
return ivy.nonzero(a)
@to_ivy_arrays_and_back
def searchsorted(a, v, side="left", sorter=None):
return ivy.searchsorted(a, v, side=side, sorter=sorter)
@to_ivy_arrays_and_back
def where(cond, x1=None, x2=None, /):
if x1 is None and x2 is None:
# numpy where behaves as np.asarray(condition).nonzero() when x and y
# not included
return ivy.asarray(cond).nonzero()
elif x1 is not None and x2 is not None:
x1, x2 = promote_types_of_numpy_inputs(x1, x2)
return ivy.where(cond, x1, x2)
else:
raise ivy.utils.exceptions.IvyException("where takes either 1 or 3 arguments")
| ivy/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py",
"repo_id": "ivy",
"token_count": 1190
} | 36 |
import sys
import ivy
from ivy.utils.exceptions import handle_exceptions
from ivy.functional.frontends import set_frontend_to_specific_version
# global
from numbers import Number
from typing import Union, Tuple, Iterable
# Constructing dtypes are required as ivy.<dtype>
# will change dynamically on the backend and may not be available
int8 = ivy.IntDtype("int8")
int16 = ivy.IntDtype("int16")
int32 = ivy.IntDtype("int32")
int64 = ivy.IntDtype("int64")
uint8 = ivy.UintDtype("uint8")
float16 = ivy.FloatDtype("float16")
float32 = ivy.FloatDtype("float32")
float64 = ivy.FloatDtype("float64")
complex64 = ivy.ComplexDtype("complex64")
complex128 = ivy.ComplexDtype("complex128")
bool = ivy.Dtype("bool")
# data type promotion
paddle_promotion_table = {
(uint8, uint8): uint8,
(uint8, int8): int16,
(uint8, int16): int16,
(uint8, int32): int32,
(uint8, int64): int64,
(uint8, float16): float16,
(uint8, float32): float32,
(uint8, float64): float64,
(uint8, bool): uint8,
(uint8, complex64): complex64,
(uint8, complex128): complex128,
(int8, uint8): int16,
(int8, int8): int8,
(int8, int16): int16,
(int8, int32): int32,
(int8, int64): int64,
(int8, float16): float16,
(int8, float32): float32,
(int8, float64): float64,
(int8, bool): int8,
(int8, complex64): complex64,
(int8, complex128): complex128,
(int16, uint8): int16,
(int16, int8): int16,
(int16, int16): int16,
(int16, int32): int32,
(int16, int64): int64,
(int16, float16): float16,
(int16, float32): float32,
(int16, float64): float64,
(int16, bool): int16,
(int16, complex64): complex64,
(int16, complex128): complex128,
(int32, uint8): int32,
(int32, int8): int32,
(int32, int16): int32,
(int32, int32): int32,
(int32, int64): int64,
(int32, float16): float16,
(int32, float32): float32,
(int32, float64): float64,
(int32, bool): int32,
(int32, complex64): complex64,
(int32, complex128): complex128,
(int64, uint8): int64,
(int64, int8): int64,
(int64, int16): int64,
(int64, int32): int64,
(int64, int64): int64,
(int64, float16): float16,
(int64, float32): float32,
(int64, float64): float64,
(int64, bool): int64,
(int64, complex64): complex64,
(int64, complex128): complex128,
(float16, uint8): float16,
(float16, int8): float16,
(float16, int16): float16,
(float16, int32): float16,
(float16, int64): float16,
(float16, float16): float16,
(float16, float32): float32,
(float16, float64): float64,
(float16, bool): float16,
(float16, complex64): complex64,
(float16, complex128): complex128,
(float32, uint8): float32,
(float32, int8): float32,
(float32, int16): float32,
(float32, int32): float32,
(float32, int64): float32,
(float32, float16): float32,
(float32, float32): float32,
(float32, float64): float64,
(float32, bool): float32,
(float32, complex64): complex64,
(float32, complex128): complex128,
(float64, uint8): float64,
(float64, int8): float64,
(float64, int16): float64,
(float64, int32): float64,
(float64, int64): float64,
(float64, float16): float64,
(float64, float32): float64,
(float64, float64): float64,
(float64, bool): float64,
(float64, complex64): complex128,
(float64, complex128): complex128,
(bool, uint8): uint8,
(bool, int8): int8,
(bool, int16): int16,
(bool, int32): int32,
(bool, int64): int64,
(bool, float16): float16,
(bool, float32): float32,
(bool, float64): float64,
(bool, bool): bool,
(bool, complex64): complex64,
(bool, complex128): complex128,
(complex64, uint8): complex64,
(complex64, int8): complex64,
(complex64, int16): complex64,
(complex64, int32): complex64,
(complex64, int64): complex64,
(complex64, float16): complex64,
(complex64, float32): complex64,
(complex64, float64): complex128,
(complex64, bool): complex64,
(complex64, complex64): complex64,
(complex64, complex128): complex128,
(complex128, uint8): complex128,
(complex128, int8): complex128,
(complex128, int16): complex128,
(complex128, int32): complex128,
(complex128, int64): complex128,
(complex128, float16): complex128,
(complex128, float32): complex128,
(complex128, float64): complex128,
(complex128, bool): complex128,
(complex128, complex64): complex128,
(complex128, complex128): complex128,
}
@handle_exceptions
def promote_types_paddle(
type1: Union[ivy.Dtype, ivy.NativeDtype],
type2: Union[ivy.Dtype, ivy.NativeDtype],
/,
) -> ivy.Dtype:
"""Promote the datatypes type1 and type2, returning the data type they
promote to.
Parameters
----------
type1
the first of the two types to promote
type2
the second of the two types to promote
Returns
-------
ret
The type that both input types promote to
"""
try:
ret = paddle_promotion_table[(ivy.as_ivy_dtype(type1), ivy.as_ivy_dtype(type2))]
except KeyError as e:
raise ivy.utils.exceptions.IvyException(
"these dtypes are not type promotable"
) from e
return ret
@handle_exceptions
def promote_types_of_paddle_inputs(
x1: Union[ivy.Array, Number, Iterable[Number]],
x2: Union[ivy.Array, Number, Iterable[Number]],
/,
) -> Tuple[ivy.Array, ivy.Array]:
"""Promote the dtype of the given native array inputs to a common dtype
based on type promotion rules.
While passing float or integer values or any other non-array input
to this function, it should be noted that the return will be an
array-like object. Therefore, outputs from this function should be
used as inputs only for those functions that expect an array-like or
tensor-like objects, otherwise it might give unexpected results.
"""
type1 = ivy.default_dtype(item=x1).strip("u123456789")
type2 = ivy.default_dtype(item=x2).strip("u123456789")
if hasattr(x1, "dtype") and not hasattr(x2, "dtype") and type1 == type2:
x1 = ivy.asarray(x1)
x2 = ivy.asarray(
x2, dtype=x1.dtype, device=ivy.default_device(item=x1, as_native=False)
)
elif not hasattr(x1, "dtype") and hasattr(x2, "dtype") and type1 == type2:
x1 = ivy.asarray(
x1, dtype=x2.dtype, device=ivy.default_device(item=x2, as_native=False)
)
x2 = ivy.asarray(x2)
else:
x1 = ivy.asarray(x1)
x2 = ivy.asarray(x2)
promoted = promote_types_paddle(x1.dtype, x2.dtype)
x1 = ivy.asarray(x1, dtype=promoted)
x2 = ivy.asarray(x2, dtype=promoted)
return x1, x2
from . import nn
from . import tensor
from .tensor.tensor import Tensor
from . import vision
from .attribute import *
from .creation import *
from .fft import *
from .linalg import *
from .logic import *
from .manipulation import *
from .math import *
from .random import *
from .search import *
from .stat import *
_frontend_array = Tensor
# setting to specific version #
# --------------------------- #
if ivy.is_local():
module = ivy.utils._importlib.import_cache[__name__]
else:
module = sys.modules[__name__]
__version__ = set_frontend_to_specific_version(module)
| ivy/ivy/functional/frontends/paddle/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/__init__.py",
"repo_id": "ivy",
"token_count": 3090
} | 37 |
# global
from ..search import * # noqa: F401
| ivy/ivy/functional/frontends/paddle/tensor/search.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/tensor/search.py",
"repo_id": "ivy",
"token_count": 16
} | 38 |
# global
import ivy
atto = ivy.atto
centi = ivy.centi
deci = ivy.deci
deka = ivy.deka
exa = ivy.exa
exbi = ivy.exbi
femto = ivy.femto
gibi = ivy.gibi
giga = ivy.giga
golden = ivy.golden
golden_ratio = ivy.golden_ratio
hecto = ivy.hecto
# Binary prefixes #
# ------#
kibi = ivy.kibi
kilo = ivy.kilo
mebi = ivy.mebi
mega = ivy.mega
micro = ivy.micro
milli = ivy.milli
nano = ivy.nano
pebi = ivy.pebi
peta = ivy.peta
# Mathematical constants #
# ------#
pi = ivy.pi
pico = ivy.pico
quecto = ivy.quecto
# SI prefixes #
# ------#
quetta = ivy.quetta
ronna = ivy.ronna
ronto = ivy.ronto
tebi = ivy.tebi
tera = ivy.tera
yobi = ivy.yobi
yocto = ivy.yocto
yotta = ivy.yotta
zebi = ivy.zebi
zepto = ivy.zepto
zetta = ivy.zetta
| ivy/ivy/functional/frontends/scipy/constants/constants.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/constants/constants.py",
"repo_id": "ivy",
"token_count": 362
} | 39 |
from .stats import *
from . import contingency
from . import distributions
from . import mstats
from . import qmc
from . import sampling
| ivy/ivy/functional/frontends/scipy/stats/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/stats/__init__.py",
"repo_id": "ivy",
"token_count": 33
} | 40 |
from ivy.functional.frontends.sklearn.base import BaseEstimator, TransformerMixin
import ivy
class LabelEncoder(TransformerMixin, BaseEstimator):
def fit(self, y):
shape = y.shape
if len(shape) == 2 and shape[1] == 1:
y = y.reshape(-1)
elif len(shape) != 1:
raise ValueError("y should be a 1d array, or column")
self.classes_ = ivy.unique_values(y)
return self
def fit_transform(self, y):
raise NotImplementedError
def transform(self, y):
raise NotImplementedError
def inverse_transform(self, y):
raise NotImplementedError
| ivy/ivy/functional/frontends/sklearn/preprocessing/_label.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/preprocessing/_label.py",
"repo_id": "ivy",
"token_count": 268
} | 41 |
# global
import inspect
from typing import Callable, Dict, Optional
import functools
# local
import ivy
import ivy.functional.frontends.tensorflow as frontend
import ivy.functional.frontends.numpy as np_frontend
# --- Helpers --- #
# --------------- #
def _ivy_array_to_tensorflow(x):
if isinstance(x, ivy.Array) or ivy.is_native_array(x):
return frontend.EagerTensor(x)
return x
def _native_to_ivy_array(x):
if isinstance(x, ivy.NativeArray):
return ivy.array(x)
return x
def _tf_frontend_array_to_ivy(x):
if hasattr(x, "ivy_array"):
return x.ivy_array
return x
def _to_ivy_array(x):
return _tf_frontend_array_to_ivy(_native_to_ivy_array(x))
# update kwargs dictionary keys helper
def _update_kwarg_keys(kwargs: Dict, to_update: Dict) -> Dict:
"""Update the key-word only arguments dictionary.
Parameters
----------
kwargs
A dictionary containing key-word only arguments to be updated.
to_update
The dictionary containing keys to update from raw_ops function the mapping
is raw_ops argument name against corresponding tf_frontend argument name.
Returns
-------
ret
An updated dictionary with new keyword mapping
"""
new_kwargs = {}
for key, value in kwargs.items():
if to_update.__contains__(key):
new_kwargs.update({to_update[key]: value})
else:
new_kwargs.update({key: value})
return new_kwargs
# --- Main --- #
# ------------ #
def handle_tf_dtype(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_tf_dtype(*args, dtype=None, **kwargs):
if len(args) > (dtype_pos + 1):
dtype = args[dtype_pos]
kwargs = {
**dict(
zip(
list(inspect.signature(fn).parameters.keys())[
dtype_pos + 1 : len(args)
],
args[dtype_pos + 1 :],
)
),
**kwargs,
}
args = args[:dtype_pos]
elif len(args) == (dtype_pos + 1):
dtype = args[dtype_pos]
args = args[:-1]
if dtype is not None:
dtype = to_ivy_dtype(dtype)
return fn(*args, dtype=dtype, **kwargs)
return fn(*args, **kwargs)
dtype_pos = list(inspect.signature(fn).parameters).index("dtype")
_handle_tf_dtype.handle_tf_dtype = True
return _handle_tf_dtype
def inputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _inputs_to_ivy_arrays_tf(*args, **kwargs):
"""Convert all `TensorFlow.Tensor` instances in both the positional and
keyword arguments into `ivy.Array` instances, and then call the
function with the updated arguments.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with ivy arrays passed in the arguments.
"""
has_out = False
out = None
if "out" in kwargs:
out = kwargs["out"]
del kwargs["out"]
has_out = True
# convert all arrays in the inputs to ivy.Array instances
ivy_args = ivy.nested_map(
_to_ivy_array, args, include_derived=True, shallow=False
)
ivy_kwargs = ivy.nested_map(
_to_ivy_array, kwargs, include_derived=True, shallow=False
)
if has_out:
ivy_kwargs["out"] = out
return fn(*ivy_args, **ivy_kwargs)
_inputs_to_ivy_arrays_tf.inputs_to_ivy_arrays_tf = True
return _inputs_to_ivy_arrays_tf
def map_raw_ops_alias(
alias: callable, kwargs_to_update: Optional[Dict] = None
) -> callable:
"""Map the raw_ops function with its respective frontend alias function, as
the implementations of raw_ops is way similar to that of frontend
functions, except that only arguments are passed as key-word only in
raw_ops functions.
Parameters
----------
alias:
The frontend function that is being referenced to as an alias to the
current raw_ops function.
kwargs_to_update:
A dictionary containing key-word args to update to conform with a given
raw_ops function
Returns
-------
ret
A decorated tf_frontend function to alias a given raw_ops function.
Only accepting key-word only arguments.
"""
def _wrap_raw_ops_alias(fn: callable, kw_update: Dict) -> callable:
# removing decorators from frontend function
fn = inspect.unwrap(fn)
# changing all the params to keyword-only
sig = inspect.signature(fn)
new_params = []
kw_update_rev = (
{value: key for key, value in kw_update.items()} if kw_update else {}
)
for param in sig.parameters.values():
# updating the name of the parameter
name = (
kw_update_rev[param.name]
if kw_update and kw_update_rev.__contains__(param.name)
else param.name
)
new_params.append(param.replace(name=name, kind=param.KEYWORD_ONLY))
new_signature = sig.replace(parameters=new_params)
def _wraped_fn(**kwargs):
# update kwargs dictionary keys
if kw_update:
kwargs = _update_kwarg_keys(kwargs, kw_update)
return fn(**kwargs)
_wraped_fn.__signature__ = new_signature
return _wraped_fn
_wrap_raw_ops_alias.wrap_raw_ops_alias = True
return _wrap_raw_ops_alias(alias, kwargs_to_update)
def outputs_to_frontend_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _outputs_to_frontend_arrays_tf(*args, **kwargs):
"""Call the function, and then convert all `tensorflow.Tensor`
instances in the function return into `ivy.Array` instances.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with ivy arrays as tensorflow.Tensor arrays.
"""
# call unmodified function
ret = fn(*args, **kwargs)
# convert all arrays in the return to `frontend.Tensorflow.tensor` instances
return ivy.nested_map(
_ivy_array_to_tensorflow, ret, include_derived={"tuple": True}
)
_outputs_to_frontend_arrays_tf.outputs_to_frontend_arrays_tf = True
return _outputs_to_frontend_arrays_tf
def to_ivy_arrays_and_back(fn: Callable) -> Callable:
return outputs_to_frontend_arrays(inputs_to_ivy_arrays(fn))
def to_ivy_dtype(dtype):
if not dtype or isinstance(dtype, str):
return dtype
if isinstance(dtype, np_frontend.dtype):
return dtype.ivy_dtype
return frontend.as_dtype(dtype).ivy_dtype
| ivy/ivy/functional/frontends/tensorflow/func_wrapper.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/func_wrapper.py",
"repo_id": "ivy",
"token_count": 3218
} | 42 |
from . import ragged
| ivy/ivy/functional/frontends/tensorflow/ragged/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/ragged/__init__.py",
"repo_id": "ivy",
"token_count": 6
} | 43 |
# global
import functools
from typing import Callable
# local
import ivy
import ivy.functional.frontends.torch as torch_frontend
numpy_compatible_args = {
"axis": "dim",
"keepdims": "keepdim",
"x": "input",
"a": "input",
"x1": "input",
"x2": "other",
}
class AccumulateGrad:
def __init__(self) -> None:
self.next_functions = ()
self.__name__ = "AccumulateGrad"
def __repr__(self):
return self.__name__
def __eq__(self, __value: object) -> bool:
return self.__name__ == __value
def __call__(self, grads):
self.__self__._grads = grads
return None
class GradFn:
def __init__(self, fn, args, kwargs) -> None:
self._inputs = []
self._fns = []
self.next_functions = []
for idx, input in [*enumerate(args), *kwargs.items()]:
if isinstance(input, torch_frontend.Tensor) and input.requires_grad:
self._inputs.append(input.detach())
def wrap_fn(idx):
def d_fn(x):
if idx in kwargs:
return fn(
*args,
**{
key: value
for key, value in kwargs.items()
if key != idx
},
idx=x,
)
return fn(*args[:idx], x, *args[idx + 1 :], **kwargs)
return d_fn
self._fns.append(to_ivy_arrays_and_back(ivy.jac(wrap_fn(idx))))
if input.grad_fn is not None:
self.next_functions.append(input.grad_fn)
elif input.is_leaf:
acc_grad = AccumulateGrad()
acc_grad.__self__ = input
self.next_functions.append(acc_grad)
self.__name__ = fn.__name__.capitalize() + "Backward"
def __call__(self, prev_grads):
result = []
for input_tensor, jac_fn in zip(self._inputs, self._fns):
jacobian = jac_fn(input_tensor)
dims = list(range(jacobian.dim()))
permuted_dims = dims[input_tensor.dim() :] + dims[: input_tensor.dim()]
result.append(
(
jacobian.permute(dims=permuted_dims).reshape(
shape=(*input_tensor.shape, -1)
)
* prev_grads.ravel()
).sum(-1)
)
return result
def __repr__(self):
return self.__name__
def __eq__(self, __value: object) -> bool:
return self.__name__ == __value
# --- Helpers --- #
# --------------- #
def _from_ivy_array_to_torch_frontend_tensor(
x, nested=False, include_derived=None, requires_grad=False
):
if nested:
return ivy.nested_map(
functools.partial(
_from_ivy_array_to_torch_frontend_tensor, requires_grad=requires_grad
),
x,
include_derived,
shallow=False,
)
elif isinstance(x, ivy.Array) or ivy.is_native_array(x):
a = torch_frontend.Tensor(x, _init_overload=True, requires_grad=requires_grad)
return a
return x
def _to_ivy_array(x):
# if x is a native array return it as an ivy array
if isinstance(x, ivy.NativeArray):
return ivy.array(x)
# else if x is a frontend torch Tensor (or any frontend "Tensor" actually) return the wrapped ivy array # noqa: E501
elif hasattr(x, "ivy_array"):
return x.ivy_array
# else just return x
return x
# --- Main --- #
# ------------ #
def inputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _inputs_to_ivy_arrays_torch(*args, **kwargs):
"""Convert `Tensor` into `ivy.Array` instances.
Convert all `Tensor` instances in both the positional and keyword arguments
into `ivy.Array` instances, and then call the function with the updated
arguments.
"""
# convert all input arrays to ivy.Array instances
new_args = ivy.nested_map(
_to_ivy_array, args, include_derived={"tuple": True}, shallow=False
)
new_kwargs = ivy.nested_map(
_to_ivy_array, kwargs, include_derived={"tuple": True}, shallow=False
)
return fn(*new_args, **new_kwargs)
_inputs_to_ivy_arrays_torch.inputs_to_ivy_arrays_torch = True
return _inputs_to_ivy_arrays_torch
# noqa: F811
def numpy_to_torch_style_args(func): # noqa
"""Convert argument names from NumPy style to PyTorch style."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
new_kwargs = {
numpy_compatible_args.get(key, key): value for key, value in kwargs.items()
}
return func(*args, **new_kwargs)
wrapper.numpy_to_torch_style_args = True
return wrapper
def outputs_to_frontend_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def outputs_to_frontend_arrays_torch(*args, **kwargs):
"""Convert `ivy.Array` into `Tensor` instances.
Call the function, and then convert all `ivy.Array` instances returned by the
function into `Tensor` instances.
"""
# call unmodified function
# ToDo: Remove this default dtype setting
# once frontend specific backend setting is added
set_default_dtype = False
if not ("dtype" in kwargs and ivy.exists(kwargs["dtype"])) and all(
not (ivy.is_array(i) or hasattr(i, "ivy_array")) for i in args
):
if ivy.current_backend_str() == "jax":
import jax
jax.config.update("jax_enable_x64", True)
ivy.set_default_int_dtype("int64")
ivy.set_default_float_dtype(torch_frontend.get_default_dtype())
set_default_dtype = True
try:
ret = fn(*args, **kwargs)
finally:
if set_default_dtype:
ivy.unset_default_int_dtype()
ivy.unset_default_float_dtype()
# convert all arrays in the return to `torch_frontend.Tensor` instances
ret = _from_ivy_array_to_torch_frontend_tensor(
ret,
nested=True,
include_derived={"tuple": True},
requires_grad=kwargs.get(
"requires_grad",
any(
isinstance(i, torch_frontend.Tensor) and i.requires_grad
for i in args
),
),
)
def array_fn(x):
return ivy.is_array(x) or hasattr(x, "ivy_array")
if "inplace" in kwargs and kwargs["inplace"]:
first_array = ivy.func_wrapper._get_first_array(
*args, array_fn=array_fn, **kwargs
)
native_ret_data = ret.ivy_array.data
if ivy.is_ivy_array(first_array):
first_array.data = native_ret_data
elif ivy.is_native_array(first_array):
ivy.inplace_update(first_array, native_ret_data)
ret = torch_frontend.Tensor(first_array, _init_overload=True)
else:
first_array.ivy_array.data = native_ret_data
ret = first_array
# logic for setting is_leaf
if ret is not None and isinstance(ret, torch_frontend.Tensor):
if fn.__name__ in dir(torch_frontend.creation_ops):
ret.is_leaf = True
elif all(
not isinstance(i, torch_frontend.Tensor)
or (not i.requires_grad and not i.grad_fn)
for i in args
):
ret.is_leaf = True
else:
ret.is_leaf = False
# set grad_fn
if any(
isinstance(i, torch_frontend.Tensor) and i.requires_grad
for i in [*args, *kwargs.values()]
):
# ToDo: Implement for unbind
grad_fn = GradFn(fn, args, kwargs)
grad_fn.__self__ = ret
ret.grad_fn = grad_fn
return ret
outputs_to_frontend_arrays_torch.outputs_to_frontend_arrays_torch = True
return outputs_to_frontend_arrays_torch
def outputs_to_native_arrays(fn: Callable):
@functools.wraps(fn)
def outputs_to_native_arrays_torch(*args, **kwargs):
ret = fn(*args, **kwargs)
if isinstance(ret, torch_frontend.Tensor):
ret = ret.ivy_array.data
return ret
outputs_to_native_arrays_torch.outputs_to_native_arrays_torch = True
return outputs_to_native_arrays_torch
def to_ivy_arrays_and_back(fn: Callable) -> Callable:
"""Wrap `fn` so it receives and returns `ivy.Array` instances.
Wrap `fn` so that input arrays are all converted to `ivy.Array` instances and
return arrays are all converted to `Tensor` instances.
"""
return outputs_to_frontend_arrays(inputs_to_ivy_arrays(fn))
def to_ivy_shape(fn: Callable) -> Callable:
"""Wrap `fn` so it receives `ivy.Shape` instances.
Wrap `fn` so that any `torch_frontend.Size` arguments are converted to
`ivy.Shape` instances.
"""
@functools.wraps(fn)
def to_ivy_shape_torch(*args, **kwargs):
new_kwargs = {
key: (
value.ivy_shape
if key in ["shape", "size"]
and isinstance(value, ivy.functional.frontends.torch.Size)
else value
)
for key, value in kwargs.items()
}
# if any of the args are instance of torch_frontend.Size,
# convert them to ivy.Shape.
new_args = ivy.nested_map(
lambda x: (
x.ivy_shape if isinstance(x, ivy.functional.frontends.torch.Size) else x
),
args,
shallow=False,
)
return fn(*new_args, **new_kwargs)
to_ivy_shape_torch.to_ivy_shape_torch = True
return to_ivy_shape_torch
| ivy/ivy/functional/frontends/torch/func_wrapper.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/func_wrapper.py",
"repo_id": "ivy",
"token_count": 5148
} | 44 |
import ivy
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_supported_dtypes
@to_ivy_arrays_and_back
def embedding(
input,
weight,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
):
# TODO: add support for the remaining arguments
ivy.utils.assertions.check_equal(
len(weight.shape), 2, message="weight must be 2-d", as_array=False
)
input = ivy.astype(input, "int64")
if max_norm is None:
ret = ivy.embedding(weight, input)
else:
if norm_type == 2.0:
ret = ivy.embedding(weight, input, max_norm=max_norm)
else:
ret = ivy.embedding(weight, input, max_norm=None)
# perform the re-norm using ivy functions
norms = ivy.vector_norm(ret, ord=norm_type, axis=-1, keepdims=True)
norms = ivy.repeat(norms, ret.shape[-1], axis=-1)
ret = ivy.where(norms > max_norm, ret * max_norm / norms, ret)
ret = ivy.where(norms < -max_norm, ret * -max_norm / norms, ret)
return ret
@with_supported_dtypes({"2.2 and below": ("int64",)}, "torch")
@to_ivy_arrays_and_back
def one_hot(tensor, num_classes=-1):
return ivy.astype(ivy.one_hot(tensor, num_classes), tensor.dtype)
| ivy/ivy/functional/frontends/torch/nn/functional/sparse_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/sparse_functions.py",
"repo_id": "ivy",
"token_count": 605
} | 45 |
from . import core
from .core import *
from . import gbm
from .gbm import *
from . import linear
from .linear import *
from . import objective
from .objective import *
from . import sklearn
from .sklearn import *
from . import training
from .training import *
_frontend_array = DMatrix
| ivy/ivy/functional/frontends/xgboost/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/xgboost/__init__.py",
"repo_id": "ivy",
"token_count": 82
} | 46 |
# global
import ast
import logging
import inspect
import math
import functools
from numbers import Number
from typing import Union, Tuple, List, Optional, Callable, Iterable, Any
import numpy as np
import importlib
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
handle_array_function,
handle_out_argument,
to_native_arrays_and_back,
inputs_to_native_arrays,
handle_nestable,
handle_array_like_without_promotion,
inputs_to_ivy_arrays,
inputs_to_native_shapes,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
# Helpers #
# --------#
def _is_valid_dtypes_attributes(fn: Callable) -> bool:
if hasattr(fn, "supported_dtypes") and hasattr(fn, "unsupported_dtypes"):
fn_supported_dtypes = fn.supported_dtypes
fn_unsupported_dtypes = fn.unsupported_dtypes
if isinstance(fn_supported_dtypes, dict):
if isinstance(fn_unsupported_dtypes, dict):
backend_str = ivy.current_backend_str()
if (
backend_str in fn_supported_dtypes
and backend_str in fn_unsupported_dtypes
):
return False
elif isinstance(fn_unsupported_dtypes, tuple):
return False
return True
def _handle_nestable_dtype_info(fn):
@functools.wraps(fn)
def _handle_nestable_dtype_info_wrapper(type):
if isinstance(type, ivy.Container):
type = type.cont_map(lambda x, kc: fn(x))
type.__dict__["max"] = type.cont_map(lambda x, kc: x.max)
type.__dict__["min"] = type.cont_map(lambda x, kc: x.min)
return type
return fn(type)
return _handle_nestable_dtype_info_wrapper
# Unindent every line in the source such that
# class methods can be traced as normal methods
def _lstrip_lines(source: str) -> str:
# Separate all lines
source = source.split("\n")
# Check amount of indent before first character
indent = len(source[0]) - len(source[0].lstrip())
# Remove same spaces from all lines
for i in range(len(source)):
source[i] = source[i][indent:]
source = "\n".join(source)
return source
# Get the list of function used the function
def _get_function_list(func):
tree = ast.parse(_lstrip_lines(inspect.getsource(func)))
names = {}
# Extract all the call names
for node in ast.walk(tree):
if isinstance(node, ast.Call):
nodef = node.func
if isinstance(nodef, ast.Name):
names[nodef.id] = getattr(
func,
"__self__",
getattr(
importlib.import_module(func.__module__),
func.__qualname__.split(".")[0],
None,
),
)
elif isinstance(nodef, ast.Attribute):
if (
hasattr(nodef, "value")
and hasattr(nodef.value, "id")
and nodef.value.id not in ["ivy", "self"]
and "_frontend" not in nodef.value.id
):
continue
names[ast.unparse(nodef)] = getattr(
func,
"__self__",
getattr(
importlib.import_module(func.__module__),
func.__qualname__.split(".")[0],
None,
),
)
return names
# Get the reference of the functions from string
def _get_functions_from_string(func_names, module):
ret = set()
# We only care about the functions in the ivy or the same module
for orig_func_name in func_names.keys():
func_name = orig_func_name.split(".")[-1]
if hasattr(ivy, func_name) and callable(getattr(ivy, func_name, None)):
ret.add(getattr(ivy, func_name))
elif hasattr(module, func_name) and callable(getattr(module, func_name, None)):
ret.add(getattr(module, func_name))
elif callable(getattr(func_names[orig_func_name], func_name, None)):
ret.add(getattr(func_names[orig_func_name], func_name))
return ret
# Get dtypes/device of nested functions, used for unsupported and supported dtypes
# IMPORTANT: a few caveats:
# 1. The base functions must be defined in ivy or the same module
# 2. If the dtypes/devices are set not in the base function, it will not be detected
# 3. Nested function cannot be parsed, due to be unable to get function reference
# 4. Functions need to be directly called, not assigned to a variable
def _nested_get(f, base_set, merge_fn, get_fn, wrapper=set):
visited = set()
to_visit = [f]
out = base_set
while to_visit:
fn = to_visit.pop()
if fn in visited:
continue
visited.add(fn)
# if it's in the backend, we can get the dtypes directly
# if it's in the front end, we need to recurse
# if it's einops, we need to recurse
if not getattr(fn, "__module__", None):
continue
is_frontend_fn = "frontend" in fn.__module__
is_backend_fn = "backend" in fn.__module__ and not is_frontend_fn
is_einops_fn = hasattr(fn, "__name__") and "einops" in fn.__name__
if is_backend_fn:
f_supported = get_fn(fn, False)
if hasattr(fn, "partial_mixed_handler"):
f_supported = merge_fn(
wrapper(f_supported["compositional"]),
wrapper(f_supported["primary"]),
)
logging.warning(
"This function includes the mixed partial function"
f" 'ivy.{fn.__name__}'. Please note that the returned data types"
" may not be exhaustive. Please check the dtypes of"
f" `ivy.{fn.__name__}` for more details"
)
out = merge_fn(wrapper(f_supported), out)
continue
elif is_frontend_fn or (hasattr(fn, "__name__") and is_einops_fn):
f_supported = wrapper(get_fn(fn, False))
out = merge_fn(f_supported, out)
# skip if it's not a function
if not (inspect.isfunction(fn) or inspect.ismethod(fn)):
continue
fl = _get_function_list(fn)
res = list(_get_functions_from_string(fl, __import__(fn.__module__)))
if is_frontend_fn:
frontends = {
"jax_frontend": "ivy.functional.frontends.jax",
"jnp_frontend": "ivy.functional.frontends.jax.numpy",
"np_frontend": "ivy.functional.frontends.numpy",
"tf_frontend": "ivy.functional.frontends.tensorflow",
"torch_frontend": "ivy.functional.frontends.torch",
"paddle_frontend": "ivy.functional.frontends.paddle",
}
for key in fl:
if "frontend" in key:
frontend_fn = fl[key]
for frontend in frontends:
if frontend in key:
key = key.replace(frontend, frontends[frontend])
if "(" in key:
key = key.split("(")[0]
frontend_module = ".".join(key.split(".")[:-1])
if (
frontend_module == ""
): # single edge case: fn='frontend_outputs_to_ivy_arrays'
continue
frontend_fl = {key: frontend_fn}
res += list(
_get_functions_from_string(
frontend_fl, importlib.import_module(frontend_module)
)
)
to_visit.extend(set(res))
return out
# allow passing "integer" if all integer dtypes are supported/unsupported for e.g.
def _expand_typesets(dtypes):
typesets = {
"valid": ivy.valid_dtypes,
"numeric": ivy.valid_numeric_dtypes,
"float": ivy.valid_float_dtypes,
"integer": ivy.valid_int_dtypes,
"unsigned": ivy.valid_uint_dtypes,
"complex": ivy.valid_complex_dtypes,
}
dtypes = list(dtypes)
typeset_list = []
for i, dtype in reversed(list(enumerate(dtypes))):
if dtype in typesets:
typeset_list.extend(typesets[dtype])
dtypes.pop(i)
dtypes += typeset_list
return dtypes
# Get the list of dtypes supported by the function
# by default returns the supported dtypes
def _get_dtypes(fn, complement=True):
supported = set(ivy.valid_dtypes)
# We only care about getting dtype info from the base function
# if we do need to at some point use dtype information from the parent function
# we can comment out the following condition
is_frontend_fn = "frontend" in fn.__module__
is_backend_fn = "backend" in fn.__module__ and not is_frontend_fn
has_unsupported_dtypes_attr = hasattr(fn, "unsupported_dtypes")
if not is_backend_fn and not is_frontend_fn and not has_unsupported_dtypes_attr:
if complement:
supported = set(ivy.all_dtypes).difference(supported)
return supported
# Their values are formatted like either
# 1. fn.supported_dtypes = ("float16",)
# Could also have the "all" value for the framework
basic = [
("supported_dtypes", set.intersection, ivy.valid_dtypes),
("unsupported_dtypes", set.difference, ivy.invalid_dtypes),
]
for key, merge_fn, base in basic:
if hasattr(fn, key):
dtypes = getattr(fn, key)
# only einops allowed to be a dictionary
if isinstance(dtypes, dict):
dtypes = dtypes.get(ivy.current_backend_str(), base)
ivy.utils.assertions.check_isinstance(dtypes, tuple)
if not dtypes:
dtypes = base
dtypes = _expand_typesets(dtypes)
supported = merge_fn(supported, set(dtypes))
if complement:
supported = set(ivy.all_dtypes).difference(supported)
return tuple(supported)
# Array API Standard #
# -------------------#
Finfo = None
Iinfo = None
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def astype(
x: Union[ivy.Array, ivy.NativeArray],
dtype: Union[ivy.Dtype, ivy.NativeDtype],
/,
*,
copy: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Copy an array to a specified data type irrespective of :ref:`type-
promotion` rules.
.. note::
Casting floating-point ``NaN`` and ``infinity`` values to integral data types
is not specified and is implementation-dependent.
.. note::
When casting a boolean input array to a numeric data type, a value of ``True``
must cast to a numeric value equal to ``1``, and a value of ``False`` must cast
to a numeric value equal to ``0``.
When casting a numeric input array to ``bool``, a value of ``0`` must cast to
``False``, and a non-zero value must cast to ``True``.
Parameters
----------
x
array to cast.
dtype
desired data type.
copy
specifies whether to copy an array when the specified ``dtype`` matches
the data type of the input array ``x``. If ``True``, a newly allocated
array must always be returned. If ``False`` and the specified ``dtype``
matches the data type of the input array, the input array must be returned;
otherwise, a newly allocated must be returned. Default: ``True``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an array having the specified data type. The returned array must have
the same shape as ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2])
>>> y = ivy.zeros_like(x)
>>> y = ivy.astype(x, ivy.float64)
>>> print(y)
ivy.array([1., 2.])
>>> x = ivy.array([3.141, 2.718, 1.618])
>>> y = ivy.zeros_like(x)
>>> ivy.astype(x, ivy.int32, out=y)
>>> print(y)
ivy.array([3., 2., 1.])
>>> x = ivy.array([[-1, -2], [0, 2]])
>>> ivy.astype(x, ivy.float64, out=x)
>>> print(x)
ivy.array([[-1., -2.], [0., 2.]])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([3.141, 2.718, 1.618])
>>> y = ivy.astype(x, ivy.int32)
>>> print(y)
ivy.array([3, 2, 1])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0,2,1]),b=ivy.array([1,0,0]))
>>> print(ivy.astype(x, ivy.bool))
{
a: ivy.array([False, True, True]),
b: ivy.array([True, False, False])
}
With :class:`ivy.Array` instance method:
>>> x = ivy.array([[-1, -2], [0, 2]])
>>> print(x.astype(ivy.float64))
ivy.array([[-1., -2.], [0., 2.]])
With :class:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([False,True,True]),
... b=ivy.array([3.14, 2.718, 1.618]))
>>> print(x.astype(ivy.int32))
{
a: ivy.array([0, 1, 1]),
b: ivy.array([3, 2, 1])
}
"""
return current_backend(x).astype(x, dtype, copy=copy, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@to_native_arrays_and_back
@handle_array_function
@handle_device
def broadcast_arrays(*arrays: Union[ivy.Array, ivy.NativeArray]) -> List[ivy.Array]:
"""Broadcasts one or more arrays against one another.
Parameters
----------
arrays
an arbitrary number of arrays to-be broadcasted.
Returns
-------
ret
A list containing broadcasted arrays of type `ivy.Array`
Each array must have the same shape, and each array must have the same
dtype as its corresponding input array.
Examples
--------
With :class:`ivy.Array` input:
>>> x1 = ivy.array([1, 2, 3])
>>> x2 = ivy.array([4, 5, 6])
>>> y = ivy.broadcast_arrays(x1, x2)
>>> print(y)
[ivy.array([1, 2, 3]), ivy.array([4, 5, 6])]
With :class:`ivy.NativeArray` inputs:
>>> x1 = ivy.native_array([0.3, 4.3])
>>> x2 = ivy.native_array([3.1, 5])
>>> x3 = ivy.native_array([2, 0])
>>> y = ivy.broadcast_arrays(x1, x2, x3)
[ivy.array([0.3, 4.3]), ivy.array([3.1, 5.]), ivy.array([2, 0])]
With mixed :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x1 = ivy.array([1, 2])
>>> x2 = ivy.native_array([0.3, 4.3])
>>> y = ivy.broadcast_arrays(x1, x2)
>>> print(y)
[ivy.array([1, 2]), ivy.array([0.3, 4.3])]
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([3, 1]), b=ivy.zeros(2))
>>> x2 = ivy.Container(a=ivy.array([4, 5]), b=ivy.array([2, -1]))
>>> y = ivy.broadcast_arrays(x1, x2)
>>> print(y)
[{
a: ivy.array([3, 1]),
b: ivy.array([0., 0.])
}, {
a: ivy.array([4, 5]),
b: ivy.array([2, -1])
}]
With mixed :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x1 = ivy.zeros(2)
>>> x2 = ivy.Container(a=ivy.array([4, 5]), b=ivy.array([2, -1]))
>>> y = ivy.broadcast_arrays(x1, x2)
>>> print(y)
[{
a: ivy.array([0., 0.]),
b: ivy.array([0., 0.])
}, {
a: ivy.array([4, 5]),
b: ivy.array([2, -1])
}]
"""
return current_backend(arrays[0]).broadcast_arrays(*arrays)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@handle_array_function
@handle_device
def broadcast_to(
x: Union[ivy.Array, ivy.NativeArray],
/,
shape: Tuple[int, ...],
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Broadcasts an array to a specified shape.
Parameters
----------
x
array to broadcast.
shape
array shape. Must be compatible with x (see Broadcasting). If
the array is incompatible with the specified shape, the function
should raise an exception.
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
an array having a specified shape. Must have the same data type as x.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.broadcast_to(x, (3, 3))
>>> print(y)
ivy.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([0.1 , 0.3])
>>> y = ivy.broadcast_to(x, (3, 2))
>>> print(y)
ivy.array([[0.1, 0.3],
[0.1, 0.3],
[0.1, 0.3]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([4, 5, 6]))
>>> y = ivy.broadcast_to(x, (3, 3))
>>> print(y)
{
a: ivy.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]),
b: ivy.array([[4, 5, 6],
[4, 5, 6],
[4, 5, 6]])
}
"""
return current_backend(x).broadcast_to(x, shape, out=out)
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def can_cast(
from_: Union[ivy.Dtype, ivy.Array, ivy.NativeArray],
to: ivy.Dtype,
/,
) -> bool:
"""Determine if one data type can be cast to another data type according to
:ref:`type- promotion` rules.
Parameters
----------
from_
input data type or array from which to cast.
to
desired data type.
Returns
-------
ret
``True`` if the cast can occur according to :ref:`type-promotion` rules;
otherwise, ``False``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.can_cast.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Dtype` input:
>>> print(ivy.can_cast(ivy.uint8, ivy.int32))
True
>>> print(ivy.can_cast(ivy.float64, 'int64'))
False
With :class:`ivy.Array` input:
>>> x = ivy.array([1., 2., 3.])
>>> print(ivy.can_cast(x, ivy.float64))
True
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[-1, -1, -1],
... [1, 1, 1]],
... dtype='int16')
>>> print(ivy.can_cast(x, 'uint8'))
False
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3, 4, 5]))
>>> print(ivy.can_cast(x, 'int64'))
{
a: False,
b: True
}
"""
if isinstance(from_, (ivy.Array, ivy.NativeArray)):
from_ = from_.dtype
dtype = ivy.promote_types(from_, to)
return dtype == to
@handle_exceptions
@handle_backend_invalid
@inputs_to_native_arrays
@handle_device
def finfo(
type: Union[ivy.Dtype, str, ivy.Array, ivy.NativeArray],
/,
) -> Finfo:
"""Machine limits for floating-point data types.
Parameters
----------
type
the kind of floating-point data-type about which to get information.
Returns
-------
ret
an object having the following attributes:
- **bits**: *int*
number of bits occupied by the floating-point data type.
- **eps**: *float*
difference between 1.0 and the next smallest representable floating-point
number larger than 1.0 according to the IEEE-754 standard.
- **max**: *float*
largest representable number.
- **min**: *float*
smallest representable number.
- **smallest_normal**: *float*
smallest positive floating-point number with full precision.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.finfo.html>`_
in the standard.
Examples
--------
With :class:`ivy.Dtype` input:
>>> y = ivy.finfo(ivy.float32)
>>> print(y)
finfo(resolution=1e-06, min=-3.4028235e+38, max=3.4028235e+38, dtype=float32)
With :code:`str` input:
>>> y = ivy.finfo('float32')
>>> print(y)
finfo(resolution=1e-06, min=-3.4028235e+38, max=3.4028235e+38, dtype=float32)
With :class:`ivy.Array` input:
>>> x = ivy.array([1.3,2.1,3.4], dtype=ivy.float64)
>>> print(ivy.finfo(x))
finfo(resolution=1e-15, min=-1.7976931348623157e+308, \
max=1.7976931348623157e+308, dtype=float64)
>>> x = ivy.array([0.7,8.4,3.14], dtype=ivy.float16)
>>> print(ivy.finfo(x))
finfo(resolution=0.001, min=-6.55040e+04, max=6.55040e+04, dtype=float16)
With :class:`ivy.Container` input:
>>> c = ivy.Container(x=ivy.array([-9.5,1.8,-8.9], dtype=ivy.float16),
... y=ivy.array([7.6,8.1,1.6], dtype=ivy.float64))
>>> print(ivy.finfo(c))
{
x: finfo(resolution=0.001, min=-6.55040e+04, max=6.55040e+04, dtype=float16),
y: finfo(resolution=1e-15, min=-1.7976931348623157e+308, \
max=1.7976931348623157e+308, dtype=float64)
}
"""
return current_backend(None).finfo(type)
@handle_exceptions
@handle_backend_invalid
@inputs_to_native_arrays
@handle_device
def iinfo(
type: Union[ivy.Dtype, str, ivy.Array, ivy.NativeArray],
/,
) -> Iinfo:
"""Machine limits for integer data types.
Parameters
----------
type
the kind of integer data-type about which to get information.
Returns
-------
ret
a class with that encapsules the following attributes:
- **bits**: *int*
number of bits occupied by the type.
- **max**: *int*
largest representable number.
- **min**: *int*
smallest representable number.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.iinfo.html>`_
in the standard.
Examples
--------
With :class:`ivy.Dtype` input:
>>> ivy.iinfo(ivy.int32)
iinfo(min=-2147483648, max=2147483647, dtype=int32)
With :code:`str` input:
>>> ivy.iinfo('int32')
iinfo(min=-2147483648, max=2147483647, dtype=int32)
With :class:`ivy.Array` input:
>>> x = ivy.array([13,21,34], dtype=ivy.int8)
>>> ivy.iinfo(x)
iinfo(min=-128, max=127, dtype=int8)
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([7,84,314], dtype=ivy.int64)
>>> ivy.iinfo(x)
iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
With :class:`ivy.Container` input:
>>> c = ivy.Container(x=ivy.array([0,1800,89], dtype=ivy.uint16),
... y=ivy.array([76,81,16], dtype=ivy.uint32))
>>> ivy.iinfo(c)
{
x: iinfo(min=0, max=65535, dtype=uint16),
y: iinfo(min=0, max=4294967295, dtype=uint32)
}
"""
return current_backend(None).iinfo(type)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@inputs_to_native_arrays
@handle_device
def result_type(
*arrays_and_dtypes: Union[ivy.Array, ivy.NativeArray, ivy.Dtype]
) -> ivy.Dtype:
"""Return the dtype that results from applying the type promotion rules
(see :ref:`type-promotion`) to the arguments.
.. note::
If provided mixed dtypes (e.g., integer and floating-point), the returned dtype
will be implementation-specific.
Parameters
----------
arrays_and_dtypes
an arbitrary number of input arrays and/or dtypes.
Returns
-------
ret
the dtype resulting from an operation involving the input arrays and dtypes.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.result_type.html>`_
in the standard.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([3, 4, 5])
>>> y = ivy.array([3., 4., 5.])
>>> d = ivy.result_type(x, y)
>>> print(d)
float32
With :class:`ivy.Dtype` input:
>>> d = ivy.result_type(ivy.uint8, ivy.uint64)
>>> print(d)
uint64
With :class:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.array([3, 4, 5]))
>>> d = x.a.dtype
>>> print(d)
int32
>>> x = ivy.Container(a = ivy.array([3, 4, 5]))
>>> d = ivy.result_type(x, ivy.float64)
>>> print(d)
{
a: float64
}
"""
return current_backend(arrays_and_dtypes[0]).result_type(*arrays_and_dtypes)
# Extra #
# ------#
default_dtype_stack = []
default_float_dtype_stack = []
default_int_dtype_stack = []
default_uint_dtype_stack = []
default_complex_dtype_stack = []
class DefaultDtype:
"""Ivy's DefaultDtype class."""
def __init__(self, dtype: ivy.Dtype):
self._dtype = dtype
def __enter__(self):
set_default_dtype(self._dtype)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
unset_default_dtype()
if self and (exc_type is not None):
raise exc_val
return self
class DefaultFloatDtype:
"""Ivy's DefaultFloatDtype class."""
def __init__(self, float_dtype: ivy.Dtype):
self._float_dtype = float_dtype
def __enter__(self):
set_default_float_dtype(self._float_dtype)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
unset_default_float_dtype()
if self and (exc_type is not None):
raise exc_val
return self
class DefaultIntDtype:
"""Ivy's DefaultIntDtype class."""
def __init__(self, int_dtype: ivy.Dtype):
self._int_dtype = int_dtype
def __enter__(self):
set_default_int_dtype(self._int_dtype)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
unset_default_int_dtype()
if self and (exc_type is not None):
raise exc_val
return self
class DefaultUintDtype:
"""Ivy's DefaultUintDtype class."""
def __init__(self, uint_dtype: ivy.UintDtype):
self._uint_dtype = uint_dtype
def __enter__(self):
set_default_uint_dtype(self._uint_dtype)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
unset_default_uint_dtype()
if self and (exc_type is not None):
raise exc_val
return self
class DefaultComplexDtype:
"""Ivy's DefaultComplexDtype class."""
def __init__(self, complex_dtype: ivy.Dtype):
self._complex_dtype = complex_dtype
def __enter__(self):
set_default_complex_dtype(self._complex_dtype)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
unset_default_complex_dtype()
if self and (exc_type is not None):
raise exc_val
return self
@handle_exceptions
def dtype_bits(dtype_in: Union[ivy.Dtype, ivy.NativeDtype, str], /) -> int:
"""Get the number of bits used for representing the input data type.
Parameters
----------
dtype_in
The data type to determine the number of bits for.
Returns
-------
ret
The number of bits used to represent the data type.
Examples
--------
With :class:`ivy.Dtype` inputs:
>>> x = ivy.dtype_bits(ivy.float32)
>>> print(x)
32
>>> x = ivy.dtype_bits('int64')
>>> print(x)
64
With :class:`ivy.NativeDtype` inputs:
>>> x = ivy.dtype_bits(ivy.native_bool)
>>> print(x)
1
"""
return current_backend(dtype_in).dtype_bits(dtype_in)
@handle_exceptions
def is_hashable_dtype(dtype_in: Union[ivy.Dtype, ivy.NativeDtype], /) -> bool:
"""Check if the given data type is hashable or not.
Parameters
----------
dtype_in
The data type to check.
Returns
-------
ret
True if data type is hashable else False
"""
# Doing something like isinstance(dtype_in, collections.abc.Hashable)
# fails where the `__hash__` method is overridden to simply raise an
# exception.
# [See `tensorflow.python.trackable.data_structures.ListWrapper`]
try:
hash(dtype_in)
return True
except TypeError:
return False
@handle_exceptions
def as_ivy_dtype(dtype_in: Union[ivy.Dtype, str], /) -> ivy.Dtype:
"""Convert native data type to string representation.
Parameters
----------
dtype_in
The data type to convert to string.
Returns
-------
ret
data type string 'float32'
"""
return current_backend(None).as_ivy_dtype(dtype_in)
@handle_exceptions
def as_native_dtype(dtype_in: Union[ivy.Dtype, ivy.NativeDtype], /) -> ivy.NativeDtype:
"""Convert data type string representation to native data type.
Parameters
----------
dtype_in
The data type string to convert to native data type.
Returns
-------
ret
data type e.g. ivy.float32.
"""
return current_backend(None).as_native_dtype(dtype_in)
def _check_float64(input) -> bool:
if ivy.is_array(input):
return ivy.dtype(input) == "float64"
if math.isfinite(input):
m, e = math.frexp(input)
return (abs(input) > 3.4028235e38) or (e < -126) or (e > 128)
return False
def _check_complex128(input) -> bool:
if ivy.is_array(input):
return ivy.dtype(input) == "complex128"
elif isinstance(input, np.ndarray):
return str(input.dtype) == "complex128"
if hasattr(input, "real") and hasattr(input, "imag"):
return _check_float64(input.real) and _check_float64(input.imag)
return False
@handle_exceptions
def closest_valid_dtype(type: Union[ivy.Dtype, str, None], /) -> Union[ivy.Dtype, str]:
"""Determine the closest valid datatype to the datatype passed as input.
Parameters
----------
type
The data type for which to check the closest valid type for.
Returns
-------
ret
The closest valid data type as a native ivy.Dtype
Examples
--------
With :class:`ivy.Dtype` input:
>>> xType = ivy.float16
>>> yType = ivy.closest_valid_dtype(xType)
>>> print(yType)
float16
With :class:`ivy.NativeDtype` inputs:
>>> xType = ivy.native_uint16
>>> yType = ivy.closest_valid_dtype(xType)
>>> print(yType)
uint16
With :code:`str` input:
>>> xType = 'int32'
>>> yType = ivy.closest_valid_dtype(xType)
>>> print(yType)
int32
"""
return current_backend(type).closest_valid_dtype(type)
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
def default_float_dtype(
*,
input: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
float_dtype: Optional[Union[ivy.FloatDtype, ivy.NativeDtype]] = None,
as_native: bool = False,
) -> Union[ivy.Dtype, str, ivy.NativeDtype]:
"""
Parameters
----------
input
Number or array for inferring the float dtype.
float_dtype
The float dtype to be returned.
as_native
Whether to return the float dtype as native dtype.
Returns
-------
Return ``float_dtype`` as native or ivy dtype if provided, else
if ``input`` is given, return its float dtype, otherwise return the
global default float dtype.
Examples
--------
>>> ivy.default_float_dtype()
'float32'
>>> ivy.set_default_float_dtype(ivy.FloatDtype("float64"))
>>> ivy.default_float_dtype()
'float64'
>>> ivy.default_float_dtype(float_dtype=ivy.FloatDtype("float16"))
'float16'
>>> ivy.default_float_dtype(input=4294.967346)
'float32'
>>> x = ivy.array([9.8,8.9], dtype="float16")
>>> ivy.default_float_dtype(input=x)
'float16'
"""
global default_float_dtype_stack
if ivy.exists(float_dtype):
if as_native is True:
return ivy.as_native_dtype(float_dtype)
return ivy.FloatDtype(ivy.as_ivy_dtype(float_dtype))
as_native = ivy.default(as_native, False)
if ivy.exists(input):
if ivy.is_array(input):
ret = ivy.dtype(input)
elif isinstance(input, np.ndarray):
ret = str(input.dtype)
elif isinstance(input, (list, tuple, dict)):
if ivy.nested_argwhere(
input, lambda x: _check_float64(x), stop_after_n_found=1
):
ret = ivy.float64
else:
if not default_float_dtype_stack:
def_dtype = default_dtype()
if ivy.is_float_dtype(def_dtype):
ret = def_dtype
else:
ret = "float32"
else:
ret = default_float_dtype_stack[-1]
elif isinstance(input, Number):
if _check_float64(input):
ret = ivy.float64
else:
if not default_float_dtype_stack:
def_dtype = default_dtype()
if ivy.is_float_dtype(def_dtype):
ret = def_dtype
else:
ret = "float32"
else:
ret = default_float_dtype_stack[-1]
else:
if not default_float_dtype_stack:
def_dtype = default_dtype()
if ivy.is_float_dtype(def_dtype):
ret = def_dtype
else:
ret = "float32"
else:
ret = default_float_dtype_stack[-1]
if as_native:
return ivy.as_native_dtype(ret)
return ivy.FloatDtype(ivy.as_ivy_dtype(ret))
@handle_exceptions
def infer_default_dtype(
dtype: Union[ivy.Dtype, ivy.NativeDtype, str], as_native: bool = False
) -> Union[ivy.Dtype, ivy.NativeDtype]:
"""Summary.
Parameters
----------
dtype
as_native
(Default value = False)
Returns
-------
Return the default data type for the “kind” (integer or floating-point) of dtype
Examples
--------
>>> ivy.set_default_int_dtype("int32")
>>> ivy.infer_default_dtype("int8")
'int8'
>>> ivy.set_default_float_dtype("float64")
>>> ivy.infer_default_dtype("float32")
'float64'
>>> ivy.set_default_uint_dtype("uint32")
>>> x = ivy.array([0], dtype="uint64")
>>> ivy.infer_default_dtype(x.dtype)
'uint32'
"""
if ivy.is_complex_dtype(dtype):
default_dtype = ivy.default_complex_dtype(as_native=as_native)
elif ivy.is_float_dtype(dtype):
default_dtype = ivy.default_float_dtype(as_native=as_native)
elif ivy.is_uint_dtype(dtype):
default_dtype = ivy.default_uint_dtype(as_native=as_native)
elif ivy.is_int_dtype(dtype):
default_dtype = ivy.default_int_dtype(as_native=as_native)
elif as_native:
default_dtype = ivy.as_native_dtype("bool")
else:
default_dtype = ivy.as_ivy_dtype("bool")
return default_dtype
@handle_exceptions
@inputs_to_ivy_arrays
def default_dtype(
*,
dtype: Optional[Union[ivy.Dtype, str]] = None,
item: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
as_native: bool = False,
) -> Union[ivy.Dtype, ivy.NativeDtype, str]:
"""
Parameters
----------
item
Number or array for inferring the dtype.
dtype
The dtype to be returned.
as_native
Whether to return the dtype as native dtype.
Returns
-------
Return ``dtype`` as native or ivy dtype if provided, else
if ``item`` is given, return its dtype, otherwise return the
global default dtype.
Examples
--------
>>> ivy.default_dtype()
'float32'
>>> ivy.set_default_dtype(ivy.bool)
>>> ivy.default_dtype()
'bool'
>>> ivy.set_default_dtype(ivy.int16)
>>> ivy.default_dtype()
'int16'
>>> ivy.set_default_dtype(ivy.float64)
>>> ivy.default_dtype()
'float64'
>>> ivy.default_dtype(dtype="int32")
'int32'
>>> ivy.default_dtype(dtype=ivy.float16)
'float16'
>>> ivy.default_dtype(item=53.234)
'float64'
>>> ivy.default_dtype(item=[1, 2, 3])
'int32'
>>> x = ivy.array([5.2, 9.7], dtype="complex128")
>>> ivy.default_dtype(item=x)
'complex128'
"""
if ivy.exists(dtype):
if as_native is True:
return ivy.as_native_dtype(dtype)
return ivy.as_ivy_dtype(dtype)
as_native = ivy.default(as_native, False)
if ivy.exists(item):
if hasattr(item, "override_dtype_check"):
return item.override_dtype_check()
elif isinstance(item, (list, tuple, dict)) and len(item) == 0:
pass
elif ivy.is_complex_dtype(item):
return ivy.default_complex_dtype(input=item, as_native=as_native)
elif ivy.is_float_dtype(item):
return ivy.default_float_dtype(input=item, as_native=as_native)
elif ivy.is_uint_dtype(item):
return ivy.default_int_dtype(input=item, as_native=as_native)
elif ivy.is_int_dtype(item):
return ivy.default_int_dtype(input=item, as_native=as_native)
elif as_native:
return ivy.as_native_dtype("bool")
else:
return "bool"
global default_dtype_stack
if not default_dtype_stack:
global default_float_dtype_stack
if default_float_dtype_stack:
ret = default_float_dtype_stack[-1]
else:
ret = "float32"
else:
ret = default_dtype_stack[-1]
if as_native:
return ivy.as_native_dtype(ret)
return ivy.as_ivy_dtype(ret)
@handle_exceptions
@inputs_to_ivy_arrays
def default_int_dtype(
*,
input: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
int_dtype: Optional[Union[ivy.IntDtype, ivy.NativeDtype]] = None,
as_native: bool = False,
) -> Union[ivy.IntDtype, ivy.NativeDtype]:
"""
Parameters
----------
input
Number or array for inferring the int dtype.
int_dtype
The int dtype to be returned.
as_native
Whether to return the int dtype as native dtype.
Returns
-------
Return ``int_dtype`` as native or ivy dtype if provided, else
if ``input`` is given, return its int dtype, otherwise return the
global default int dtype.
Examples
--------
>>> ivy.set_default_int_dtype(ivy.intDtype("int16"))
>>> ivy.default_int_dtype()
'int16'
>>> ivy.default_int_dtype(input=4294967346)
'int64'
>>> ivy.default_int_dtype(int_dtype=ivy.intDtype("int8"))
'int8'
>>> x = ivy.array([9,8], dtype="int32")
>>> ivy.default_int_dtype(input=x)
'int32'
"""
global default_int_dtype_stack
if ivy.exists(int_dtype):
if as_native is True:
return ivy.as_native_dtype(int_dtype)
return ivy.IntDtype(ivy.as_ivy_dtype(int_dtype))
as_native = ivy.default(as_native, False)
if ivy.exists(input):
if ivy.is_array(input):
ret = ivy.dtype(input)
elif isinstance(input, ivy.Shape):
ret = ivy.default_int_dtype()
elif isinstance(input, np.ndarray):
ret = str(input.dtype)
elif isinstance(input, (list, tuple, dict)):
if ivy.nested_argwhere(
input,
lambda x: (
ivy.dtype(x) == "uint64"
if ivy.is_array(x)
else x > 9223372036854775807 and x != ivy.inf
),
stop_after_n_found=1,
):
ret = ivy.uint64
elif ivy.nested_argwhere(
input,
lambda x: (
ivy.dtype(x) == "int64"
if ivy.is_array(x)
else x > 2147483647 and x != ivy.inf
),
stop_after_n_found=1,
):
ret = ivy.int64
else:
if not default_int_dtype_stack:
def_dtype = ivy.default_dtype()
if ivy.is_int_dtype(def_dtype):
ret = def_dtype
else:
ret = "int32"
else:
ret = default_int_dtype_stack[-1]
elif isinstance(input, Number):
if (
input > 9223372036854775807
and input != ivy.inf
and ivy.backend != "torch"
):
ret = ivy.uint64
elif input > 2147483647 and input != ivy.inf:
ret = ivy.int64
else:
if not default_int_dtype_stack:
def_dtype = ivy.default_dtype()
if ivy.is_int_dtype(def_dtype):
ret = def_dtype
else:
ret = "int32"
else:
ret = default_int_dtype_stack[-1]
else:
if not default_int_dtype_stack:
def_dtype = ivy.default_dtype()
if ivy.is_int_dtype(def_dtype):
ret = def_dtype
else:
ret = "int32"
else:
ret = default_int_dtype_stack[-1]
if as_native:
return ivy.as_native_dtype(ret)
return ivy.IntDtype(ivy.as_ivy_dtype(ret))
@handle_exceptions
@inputs_to_ivy_arrays
def default_uint_dtype(
*,
input: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
uint_dtype: Optional[Union[ivy.UintDtype, ivy.NativeDtype]] = None,
as_native: bool = False,
) -> Union[ivy.UintDtype, ivy.NativeDtype]:
"""
Parameters
----------
input
Number or array for inferring the uint dtype.
uint_dtype
The uint dtype to be returned.
as_native
Whether to return the uint dtype as native dtype.
Returns
-------
Return ``uint_dtype`` as native or ivy dtype if provided, else
if ``input`` is given, return its uint dtype, otherwise return the
global default uint dtype.
Examples
--------
>>> ivy.set_default_uint_dtype(ivy.UintDtype("uint16"))
>>> ivy.default_uint_dtype()
'uint16'
>>> ivy.default_uint_dtype(input=4294967346)
'uint64'
>>> ivy.default_uint_dtype(uint_dtype=ivy.UintDtype("uint8"))
'uint8'
>>> x = ivy.array([9,8], dtype="uint32")
>>> ivy.default_uint_dtype(input=x)
'uint32'
"""
global default_uint_dtype_stack
if ivy.exists(uint_dtype):
if as_native is True:
return ivy.as_native_dtype(uint_dtype)
return ivy.UintDtype(ivy.as_ivy_dtype(uint_dtype))
as_native = ivy.default(as_native, False)
if ivy.exists(input):
if ivy.is_array(input):
ret = ivy.dtype(input)
elif isinstance(input, np.ndarray):
ret = input.dtype
elif isinstance(input, (list, tuple, dict)):
def is_native(x):
return ivy.is_native_array(x)
if ivy.nested_argwhere(
input,
lambda x: (
ivy.dtype(x) == "uint64"
if is_native(x)
else x > 9223372036854775807 and x != ivy.inf
),
stop_after_n_found=1,
):
ret = ivy.uint64
else:
if default_uint_dtype_stack:
ret = default_uint_dtype_stack[-1]
else:
def_dtype = ivy.default_dtype()
if ivy.is_uint_dtype(def_dtype):
ret = def_dtype
else:
ret = "uint32"
elif isinstance(input, Number):
if input > 4294967295 and input != ivy.inf and ivy.backend != "torch":
ret = ivy.uint64
else:
if default_uint_dtype_stack:
ret = default_uint_dtype_stack[-1]
else:
def_dtype = ivy.default_dtype()
if ivy.is_uint_dtype(def_dtype):
ret = def_dtype
else:
ret = "uint32"
else:
if default_uint_dtype_stack:
ret = default_uint_dtype_stack[-1]
else:
def_dtype = ivy.default_dtype()
if ivy.is_uint_dtype(def_dtype):
ret = def_dtype
else:
ret = "uint32"
if as_native:
return ivy.as_native_dtype(ret)
return ivy.UintDtype(ivy.as_ivy_dtype(ret))
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_device
def default_complex_dtype(
*,
input: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
complex_dtype: Optional[Union[ivy.ComplexDtype, ivy.NativeDtype]] = None,
as_native: bool = False,
) -> Union[ivy.Dtype, str, ivy.NativeDtype]:
"""
Parameters
----------
input
Number or array for inferring the complex dtype.
complex_dtype
The complex dtype to be returned.
as_native
Whether to return the complex dtype as native dtype.
Returns
-------
Return ``complex_dtype`` as native or ivy dtype if provided, else
if ``input`` is given, return its complex dtype, otherwise return the
global default complex dtype.
Examples
--------
>>> ivy.default_complex_dtype()
'complex64'
>>> ivy.set_default_complex_dtype(ivy.ComplexDtype("complex64"))
>>> ivy.default_complex_dtype()
'complex64'
>>> ivy.default_complex_dtype(complex_dtype=ivy.ComplexDtype("complex128"))
'complex128'
>>> ivy.default_complex_dtype(input=4294.967346)
'complex64'
>>> x = ivy.array([9.8,8.9], dtype="complex128")
>>> ivy.default_complex_dtype(input=x)
'complex128'
"""
global default_complex_dtype_stack
if ivy.exists(complex_dtype):
if as_native is True:
return ivy.as_native_dtype(complex_dtype)
return ivy.ComplexDtype(ivy.as_ivy_dtype(complex_dtype))
as_native = ivy.default(as_native, False)
if ivy.exists(input):
if ivy.is_array(input):
ret = ivy.dtype(input)
elif isinstance(input, np.ndarray):
ret = str(input.dtype)
elif isinstance(input, (list, tuple, dict)):
if ivy.nested_argwhere(
input, lambda x: _check_complex128(x), stop_after_n_found=1
):
ret = ivy.complex128
else:
if not default_complex_dtype_stack:
def_dtype = default_dtype()
if ivy.is_complex_dtype(def_dtype):
ret = def_dtype
else:
ret = "complex64"
else:
ret = default_complex_dtype_stack[-1]
elif isinstance(input, Number):
if _check_complex128(input):
ret = ivy.complex128
else:
if not default_complex_dtype_stack:
def_dtype = default_dtype()
if ivy.is_complex_dtype(def_dtype):
ret = def_dtype
else:
ret = "complex64"
else:
ret = default_complex_dtype_stack[-1]
else:
if not default_complex_dtype_stack:
def_dtype = default_dtype()
if ivy.is_complex_dtype(def_dtype):
ret = def_dtype
else:
ret = "complex64"
else:
ret = default_complex_dtype_stack[-1]
if as_native:
return ivy.as_native_dtype(ret)
return ivy.ComplexDtype(ivy.as_ivy_dtype(ret))
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@inputs_to_native_arrays
@handle_device
def dtype(
x: Union[ivy.Array, ivy.NativeArray], *, as_native: bool = False
) -> Union[ivy.Dtype, ivy.NativeDtype]:
"""Get the data type for input array x.
Parameters
----------
x
Tensor for which to get the data type.
as_native
Whether or not to return the dtype in string format. Default is ``False``.
Returns
-------
ret
Data type of the array.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x1 = ivy.array([1.0, 2.0, 3.5, 4.5, 5, 6])
>>> y = ivy.dtype(x1)
>>> print(y)
float32
With :class:`ivy.NativeArray` inputs:
>>> x1 = ivy.native_array([1, 0, 1, -1, 0])
>>> y = ivy.dtype(x1)
>>> print(y)
int32
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.native_array([1.0, 2.0, -1.0, 4.0, 1.0]),
... b=ivy.native_array([1, 0, 0, 0, 1]))
>>> y = ivy.dtype(x.a)
>>> print(y)
float32
"""
return current_backend(x).dtype(x, as_native=as_native)
@handle_exceptions
@handle_nestable
def function_supported_dtypes(fn: Callable, recurse: bool = True) -> Union[Tuple, dict]:
"""Return the supported data types of the current backend's function. The
function returns a dict containing the supported dtypes for the
compositional and primary implementations in case of partial mixed
functions.
Parameters
----------
fn
The function to check for the supported dtype attribute
recurse
Whether to recurse into used ivy functions. Default is ``True``.
Returns
-------
ret
Tuple or dict containing the supported dtypes of the function
Examples
--------
>>> print(ivy.function_supported_dtypes(ivy.acosh))
('bool', 'float64', 'int64', 'uint8', 'int8', 'float32', 'int32', 'int16', \
'bfloat16')
"""
ivy.utils.assertions.check_true(
_is_valid_dtypes_attributes(fn),
"supported_dtypes and unsupported_dtypes attributes cannot both exist "
"in a particular backend",
)
if hasattr(fn, "partial_mixed_handler"):
return {
"compositional": function_supported_dtypes(fn.compos, recurse=recurse),
"primary": _get_dtypes(fn, complement=False),
}
else:
supported_dtypes = set(_get_dtypes(fn, complement=False))
if recurse:
supported_dtypes = _nested_get(
fn, supported_dtypes, set.intersection, function_supported_dtypes
)
return (
supported_dtypes
if isinstance(supported_dtypes, dict)
else tuple(supported_dtypes)
)
@handle_exceptions
@handle_nestable
def function_unsupported_dtypes(
fn: Callable, recurse: bool = True
) -> Union[Tuple, dict]:
"""Return the unsupported data types of the current backend's function. The
function returns a dict containing the unsupported dtypes for the
compositional and primary implementations in case of partial mixed
functions.
Parameters
----------
fn
The function to check for the unsupported dtype attribute
recurse
Whether to recurse into used ivy functions. Default is ``True``.
Returns
-------
ret
Tuple or dict containing the unsupported dtypes of the function
Examples
--------
>>> ivy.set_backend('torch')
>>> print(ivy.function_unsupported_dtypes(ivy.acosh))
('float16','uint16','uint32','uint64')
"""
ivy.utils.assertions.check_true(
_is_valid_dtypes_attributes(fn),
"supported_dtypes and unsupported_dtypes attributes cannot both exist "
"in a particular backend",
)
if hasattr(fn, "partial_mixed_handler"):
return {
"compositional": function_unsupported_dtypes(fn.compos, recurse=recurse),
"primary": _get_dtypes(fn, complement=True),
}
else:
unsupported_dtypes = set(_get_dtypes(fn, complement=True))
if recurse:
unsupported_dtypes = _nested_get(
fn, unsupported_dtypes, set.union, function_unsupported_dtypes
)
return (
unsupported_dtypes
if isinstance(unsupported_dtypes, dict)
else tuple(unsupported_dtypes)
)
@handle_exceptions
def invalid_dtype(dtype_in: Union[ivy.Dtype, ivy.NativeDtype, str, None], /) -> bool:
"""Determine whether the provided data type is not support by the current
framework.
Parameters
----------
dtype_in
The data type for which to check for backend non-support
Returns
-------
ret
Boolean, whether the data-type string is un-supported.
Examples
--------
>>> print(ivy.invalid_dtype(None))
False
>>> print(ivy.invalid_dtype("uint64"))
False
>>> print(ivy.invalid_dtype(ivy.float64))
False
>>> print(ivy.invalid_dtype(ivy.native_uint8))
False
"""
if dtype_in is None:
return False
return ivy.as_ivy_dtype(dtype_in) in ivy.invalid_dtypes
@handle_exceptions
@handle_nestable
@inputs_to_native_arrays
def is_bool_dtype(
dtype_in: Union[ivy.Dtype, str, ivy.Array, ivy.NativeArray, Number],
/,
) -> bool:
"""Determine whether the input data type is a bool data type.
Parameters
----------
dtype_in
input data type to test.
Returns
-------
ret
"True" if the input data type is a bool, otherwise "False".
Both the description and the type hints above assumes an array input for
simplicity but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
"""
if ivy.is_array(dtype_in):
dtype_in = ivy.dtype(dtype_in)
elif isinstance(dtype_in, np.ndarray):
return "bool" in dtype_in.dtype.name
elif isinstance(dtype_in, Number):
return isinstance(dtype_in, (bool, np.bool_)) and not isinstance(dtype_in, bool)
elif isinstance(dtype_in, (list, tuple, dict)):
return bool(
ivy.nested_argwhere(
dtype_in,
lambda x: isinstance(x, (bool, np.bool_)) and x is not int,
)
)
return "bool" in ivy.as_ivy_dtype(dtype_in)
@handle_exceptions
@handle_nestable
@inputs_to_native_arrays
def is_int_dtype(
dtype_in: Union[ivy.Dtype, str, ivy.Array, ivy.NativeArray, Number],
/,
) -> bool:
"""Determine whether the input data type is an int data type.
Parameters
----------
dtype_in
input data type to test.
Returns
-------
ret
"True" if the input data type is an integer, otherwise "False".
Both the description and the type hints above assumes an array input for
simplicity but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Dtype` input:
>>> x = ivy.is_int_dtype(ivy.float64)
>>> print(x)
False
With :class:`ivy.Array` input:
>>> x = ivy.array([1., 2., 3.])
>>> print(ivy.is_int_dtype(x), x.dtype)
False float32
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[-1, -1, -1], [1, 1, 1]], dtype=ivy.int16)
>>> print(ivy.is_int_dtype(x))
True
With :code:`Number` input:
>>> x = 1
>>> print(ivy.is_int_dtype(x))
True
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),b=ivy.array([3, 4, 5]))
>>> print(ivy.is_int_dtype(x))
{
a: False,
b: True
}
"""
if ivy.is_array(dtype_in):
dtype_in = ivy.dtype(dtype_in)
elif isinstance(dtype_in, ivy.Shape):
dtype_in = ivy.default_int_dtype()
elif isinstance(dtype_in, np.ndarray):
return "int" in dtype_in.dtype.name
elif isinstance(dtype_in, Number):
return isinstance(dtype_in, (int, np.integer)) and not isinstance(
dtype_in, bool
)
elif isinstance(dtype_in, (list, tuple, dict)):
def nested_fun(x):
return (
isinstance(x, (int, np.integer))
or (ivy.is_array(x) and "int" in ivy.dtype(x))
) and x is not bool
return bool(ivy.nested_argwhere(dtype_in, nested_fun))
return "int" in ivy.as_ivy_dtype(dtype_in)
@handle_exceptions
def check_float(x: Any) -> bool:
"""Check if the input is a float or a float-like object.
Parameters
----------
x
Input to check.
Returns
-------
ret
"True" if the input is a float or a float-like object, otherwise "False".
"""
return isinstance(x, (int, float)) and x is not bool
@handle_exceptions
@handle_nestable
@inputs_to_native_arrays
def is_float_dtype(
dtype_in: Union[ivy.Dtype, str, ivy.Array, ivy.NativeArray, Number],
/,
) -> bool:
"""Determine whether the input data type is a float dtype.
Parameters
----------
dtype_in
The array or data type to check
Returns
-------
ret
Whether or not the array or data type is of a floating point dtype
Examples
--------
>>> x = ivy.is_float_dtype(ivy.float32)
>>> print(x)
True
>>> arr = ivy.array([1.2, 3.2, 4.3], dtype=ivy.float32)
>>> print(ivy.is_float_dtype(arr))
True
"""
if ivy.is_array(dtype_in):
dtype_in = ivy.dtype(dtype_in)
elif isinstance(dtype_in, ivy.Shape):
dtype_in = ivy.default_int_dtype()
elif isinstance(dtype_in, np.ndarray):
return "float" in dtype_in.dtype.name
elif isinstance(dtype_in, Number):
return isinstance(dtype_in, (float, np.floating))
elif isinstance(dtype_in, (list, tuple, dict)):
return bool(
ivy.nested_argwhere(
dtype_in,
lambda x: isinstance(x, (float, np.floating))
or (ivy.is_array(x) and "float" in ivy.dtype(x)),
)
)
return "float" in as_ivy_dtype(dtype_in)
@handle_exceptions
@handle_nestable
@inputs_to_native_arrays
def is_uint_dtype(
dtype_in: Union[ivy.Dtype, str, ivy.Array, ivy.NativeArray, Number],
/,
) -> bool:
"""Determine whether the input data type is a uint dtype.
Parameters
----------
dtype_in
The array or data type to check
Returns
-------
ret
Whether or not the array or data type is of a uint dtype
Examples
--------
>>> ivy.is_uint_dtype(ivy.UintDtype("uint16"))
True
>>> ivy.is_uint_dtype(ivy.Dtype("uint8"))
True
>>> ivy.is_uint_dtype(ivy.IntDtype("int64"))
False
"""
if ivy.is_array(dtype_in):
dtype_in = ivy.dtype(dtype_in)
elif isinstance(dtype_in, ivy.Shape):
dtype_in = ivy.default_int_dtype()
elif isinstance(dtype_in, np.ndarray):
return "uint" in dtype_in.dtype.name
elif isinstance(dtype_in, Number):
return isinstance(dtype_in, np.unsignedinteger)
elif isinstance(dtype_in, (list, tuple, dict)):
return ivy.nested_argwhere(
dtype_in,
lambda x: isinstance(x, np.unsignedinteger)
or (ivy.is_array(x) and "uint" in ivy.dtype(x)),
)
return "uint" in as_ivy_dtype(dtype_in)
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
def is_complex_dtype(
dtype_in: Union[ivy.Dtype, str, ivy.Array, ivy.NativeArray, Number],
/,
) -> bool:
"""Determine whether the input data type is a complex dtype.
Parameters
----------
dtype_in
The array or data type to check
Returns
-------
ret
Whether or not the array or data type is of a complex dtype
Examples
--------
>>> ivy.is_complex_dtype(ivy.ComplexDtype("complex64"))
True
>>> ivy.is_complex_dtype(ivy.Dtype("complex128"))
True
>>> ivy.is_complex_dtype(ivy.IntDtype("int64"))
False
"""
if ivy.is_array(dtype_in):
dtype_in = ivy.dtype(dtype_in)
elif isinstance(dtype_in, ivy.Shape):
dtype_in = ivy.default_int_dtype()
elif isinstance(dtype_in, np.ndarray):
return "complex" in dtype_in.dtype.name
elif isinstance(dtype_in, Number):
return isinstance(dtype_in, (complex, np.complexfloating))
elif isinstance(dtype_in, (list, tuple, dict)):
return ivy.nested_argwhere(
dtype_in,
lambda x: isinstance(x, (complex, np.complexfloating))
or (ivy.is_array(x) and "complex" in ivy.dtype(x)),
)
return "complex" in as_ivy_dtype(dtype_in)
@handle_exceptions
def promote_types(
type1: Union[ivy.Dtype, ivy.NativeDtype],
type2: Union[ivy.Dtype, ivy.NativeDtype],
/,
*,
array_api_promotion: bool = False,
) -> ivy.Dtype:
"""Promote the datatypes type1 and type2, returning the data type they
promote to.
Parameters
----------
type1
the first of the two types to promote
type2
the second of the two types to promote
array_api_promotion
whether to only use the array api promotion rules
Returns
-------
ret
The type that both input types promote to
"""
# in case either is of none type
if not (type1 and type2):
return type1 if type1 else type2
query = [ivy.as_ivy_dtype(type1), ivy.as_ivy_dtype(type2)]
query = tuple(query)
if query not in ivy.promotion_table:
query = (query[1], query[0])
def _promote(query):
if array_api_promotion:
return ivy.array_api_promotion_table[query]
return ivy.promotion_table[query]
return _promote(query)
@handle_exceptions
def set_default_dtype(dtype: Union[ivy.Dtype, ivy.NativeDtype, str], /):
"""Set the datatype `dtype` as default data type.
Parameters
----------
dtype
the data_type to set as default data type
Examples
--------
With :class:`ivy.Dtype` input:
>>> ivy.set_default_dtype(ivy.bool)
>>> ivy.default_dtype_stack
['bool']
>>> ivy.unset_default_dtype()
>>> ivy.set_default_dtype("float64")
>>> ivy.default_dtype_stack
['float64']
>>> ivy.unset_default_dtype()
With :class:`ivy.NativeDtype` input:
>>> ivy.set_default_dtype(ivy.native_uint64)
>>> ivy.default_dtype_stack
['uint64']
"""
dtype = ivy.as_ivy_dtype(dtype)
ivy.utils.assertions._check_jax_x64_flag(dtype)
global default_dtype_stack
default_dtype_stack.append(dtype)
@handle_exceptions
def set_default_float_dtype(float_dtype: Union[ivy.Dtype, str], /):
"""Set the 'float_dtype' as the default data type.
Parameters
----------
float_dtype
The float data type to be set as the default.
Examples
--------
With :class: `ivy.Dtype` input:
>>> ivy.set_default_float_dtype(ivy.floatDtype("float64"))
>>> ivy.default_float_dtype()
'float64'
>>> ivy.set_default_float_dtype(ivy.floatDtype("float32"))
>>> ivy.default_float_dtype()
'float32'
"""
float_dtype = ivy.FloatDtype(ivy.as_ivy_dtype(float_dtype))
ivy.utils.assertions._check_jax_x64_flag(float_dtype)
global default_float_dtype_stack
default_float_dtype_stack.append(float_dtype)
@handle_exceptions
def set_default_int_dtype(int_dtype: Union[ivy.Dtype, str], /):
"""Set the 'int_dtype' as the default data type.
Parameters
----------
int_dtype
The integer data type to be set as the default.
Examples
--------
With :class: `ivy.Dtype` input:
>>> ivy.set_default_int_dtype(ivy.intDtype("int64"))
>>> ivy.default_int_dtype()
'int64'
>>> ivy.set_default_int_dtype(ivy.intDtype("int32"))
>>> ivy.default_int_dtype()
'int32'
"""
int_dtype = ivy.IntDtype(ivy.as_ivy_dtype(int_dtype))
ivy.utils.assertions._check_jax_x64_flag(int_dtype)
global default_int_dtype_stack
default_int_dtype_stack.append(int_dtype)
@handle_exceptions
def set_default_uint_dtype(uint_dtype: Union[ivy.Dtype, str], /):
"""Set the uint dtype to be default.
Parameters
----------
uint_dtype
The uint dtype to be set as default.
Examples
--------
>>> ivy.set_default_uint_dtype(ivy.UintDtype("uint8"))
>>> ivy.default_uint_dtype()
'uint8'
>>> ivy.set_default_uint_dtype(ivy.UintDtype("uint64"))
>>> ivy.default_uint_dtype()
'uint64'
"""
uint_dtype = ivy.UintDtype(ivy.as_ivy_dtype(uint_dtype))
ivy.utils.assertions._check_jax_x64_flag(uint_dtype)
global default_uint_dtype_stack
default_uint_dtype_stack.append(uint_dtype)
@handle_exceptions
def set_default_complex_dtype(complex_dtype: Union[ivy.Dtype, str], /):
"""Set the 'complex_dtype' as the default data type.
Parameters
----------
complex_dtype
The complex data type to be set as the default.
Examples
--------
With :class: `ivy.Dtype` input:
>>> ivy.set_default_complex_dtype(ivy.ComplexDtype("complex64"))
>>> ivy.default_complex_dtype()
'complex64'
>>> ivy.set_default_float_dtype(ivy.ComplexDtype("complex128"))
>>> ivy.default_complex_dtype()
'complex128'
"""
complex_dtype = ivy.ComplexDtype(ivy.as_ivy_dtype(complex_dtype))
ivy.utils.assertions._check_jax_x64_flag(complex_dtype)
global default_complex_dtype_stack
default_complex_dtype_stack.append(complex_dtype)
@handle_exceptions
def type_promote_arrays(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
) -> Tuple:
"""Type promote the input arrays, returning new arrays with the shared
correct data type.
Parameters
----------
x1
the first of the two arrays to type promote
x2
the second of the two arrays to type promote
Returns
-------
ret1, ret2
The input arrays after type promotion
"""
new_type = ivy.promote_types(ivy.dtype(x1), ivy.dtype(x2))
return ivy.astype(x1, new_type), ivy.astype(x2, new_type)
@handle_exceptions
def unset_default_dtype():
"""Reset the current default dtype to the previous state.
Examples
--------
>>> ivy.set_default_dtype(ivy.int32)
>>> ivy.set_default_dtype(ivy.bool)
>>> ivy.default_dtype_stack
['int32', 'bool']
>>> ivy.unset_default_dtype()
>>> ivy.default_dtype_stack
['int32']
>>> ivy.unset_default_dtype()
>>> ivy.default_dtype_stack
[]
"""
global default_dtype_stack
if default_dtype_stack:
default_dtype_stack.pop(-1)
@handle_exceptions
def unset_default_float_dtype():
"""Reset the current default float dtype to the previous state.
Examples
--------
>>> ivy.set_default_float_dtype(ivy.float32)
>>> ivy.set_default_float_dtype(ivy.float64)
>>> ivy.default_float_dtype_stack
['float32','float64']
>>> ivy.unset_default_float_dtype()
>>> ivy.default_float_dtype_stack
['float32']
"""
global default_float_dtype_stack
if default_float_dtype_stack:
default_float_dtype_stack.pop(-1)
@handle_exceptions
def unset_default_int_dtype():
"""Reset the current default int dtype to the previous state.
Examples
--------
>>> ivy.set_default_int_dtype(ivy.intDtype("int16"))
>>> ivy.default_int_dtype()
'int16'
>>> ivy.unset_default_int_dtype()
>>> ivy.default_int_dtype()
'int32'
"""
global default_int_dtype_stack
if default_int_dtype_stack:
default_int_dtype_stack.pop(-1)
@handle_exceptions
def unset_default_uint_dtype():
"""Reset the current default uint dtype to the previous state.
Examples
--------
>>> ivy.set_default_uint_dtype(ivy.UintDtype("uint8"))
>>> ivy.default_uint_dtype()
'uint8'
>>> ivy.unset_default_uint_dtype()
>>> ivy.default_uint_dtype()
'uint32'
"""
global default_uint_dtype_stack
if default_uint_dtype_stack:
default_uint_dtype_stack.pop(-1)
@handle_exceptions
def unset_default_complex_dtype():
"""Reset the current default complex dtype to the previous state.
Examples
--------
>>> ivy.set_default_complex_dtype(ivy.complex64)
>>> ivy.set_default_complex_dtype(ivy.complex128)
>>> ivy.default_complex_dtype_stack
['complex64','complex128']
>>> ivy.unset_default_complex_dtype()
>>> ivy.default_complex_dtype_stack
['complex64']
"""
global default_complex_dtype_stack
if default_complex_dtype_stack:
default_complex_dtype_stack.pop(-1)
@handle_exceptions
def valid_dtype(dtype_in: Union[ivy.Dtype, ivy.NativeDtype, str, None], /) -> bool:
"""Determine whether the provided data type is supported by the current
framework.
Parameters
----------
dtype_in
The data type for which to check for backend support
Returns
-------
ret
Boolean, whether or not the data-type string is supported.
Examples
--------
>>> print(ivy.valid_dtype(None))
True
>>> print(ivy.valid_dtype(ivy.float64))
True
>>> print(ivy.valid_dtype('bool'))
True
>>> print(ivy.valid_dtype(ivy.native_float16))
True
"""
if dtype_in is None:
return True
return ivy.as_ivy_dtype(dtype_in) in ivy.valid_dtypes
@handle_exceptions
def promote_types_of_inputs(
x1: Union[ivy.NativeArray, Number, Iterable[Number]],
x2: Union[ivy.NativeArray, Number, Iterable[Number]],
/,
*,
array_api_promotion: bool = False,
) -> Tuple[ivy.NativeArray, ivy.NativeArray]:
"""Promote the dtype of the given native array inputs to a common dtype
based on type promotion rules.
While passing float or integer values or any other non-array input
to this function, it should be noted that the return will be an
array-like object. Therefore, outputs from this function should be
used as inputs only for those functions that expect an array-like or
tensor-like objects, otherwise it might give unexpected results.
"""
def _special_case(a1, a2):
# check for float number and integer array case
return isinstance(a1, float) and "int" in str(a2.dtype)
def _get_target_dtype(scalar, arr):
# identify a good dtype to give the scalar value,
# based on it's own type and that of the arr value
if _special_case(scalar, arr):
return "float64"
elif arr.dtype == bool and not isinstance(scalar, bool):
return None # let ivy infer a dtype
elif isinstance(scalar, complex) and not ivy.is_complex_dtype(arr):
return "complex128"
else:
return arr.dtype
if hasattr(x1, "dtype") and not hasattr(x2, "dtype"):
device = ivy.default_device(item=x1, as_native=True)
x2 = ivy.asarray(x2, dtype=_get_target_dtype(x2, x1), device=device)
elif hasattr(x2, "dtype") and not hasattr(x1, "dtype"):
device = ivy.default_device(item=x2, as_native=True)
x1 = ivy.asarray(x1, dtype=_get_target_dtype(x1, x2), device=device)
elif not (hasattr(x1, "dtype") or hasattr(x2, "dtype")):
x1 = ivy.asarray(x1)
x2 = ivy.asarray(x2)
if x1.dtype != x2.dtype:
promoted = promote_types(
x1.dtype, x2.dtype, array_api_promotion=array_api_promotion
)
x1 = ivy.astype(x1, promoted, copy=False)
x2 = ivy.astype(x2, promoted, copy=False)
ivy.utils.assertions._check_jax_x64_flag(x1.dtype)
return ivy.to_native(x1), ivy.to_native(x2)
@handle_exceptions
def is_native_dtype(dtype_in: Union[ivy.Dtype, ivy.NativeDtype], /) -> bool:
"""Determine whether the input dtype is a Native dtype.
Parameters
----------
dtype_in
Determine whether the input data type is a native data type object.
Returns
-------
ret
Boolean, whether or not dtype_in is a native data type.
Examples
--------
>>> ivy.set_backend('numpy')
>>> ivy.is_native_dtype(np.int32)
True
>>> ivy.set_backend('numpy')
>>> ivy.is_native_array(ivy.float64)
False
"""
return current_backend(None).is_native_dtype(dtype_in)
| ivy/ivy/functional/ivy/data_type.py/0 | {
"file_path": "ivy/ivy/functional/ivy/data_type.py",
"repo_id": "ivy",
"token_count": 33761
} | 47 |
# global
import ivy
from ivy.func_wrapper import handle_array_function
from ivy.functional.ivy.gradients import gradient_descent_update
from ivy.utils.exceptions import handle_exceptions
# local
from typing import Optional, Union, Callable, Tuple, Any
# Extra #
# ------#
# Private #
def _compute_cost_and_update_grads(
cost_fn,
order,
batch,
variables,
outer_v,
keep_outer_v,
average_across_steps_or_final,
all_grads,
unique_outer,
batched,
num_tasks,
):
"""Compute cost and update gradients.
This function computes the cost and updates gradients for optimization.
Parameters
----------
cost_fn : function
The cost function.
order : int
The order of computation.
batch : object
The batch data.
variables : ivy.Container
The variables for optimization.
outer_v : object
Outer variable.
keep_outer_v : bool
Whether to keep outer variable.
average_across_steps_or_final : bool
Whether to average across steps or final.
all_grads : list
List to accumulate gradients.
unique_outer : bool
Whether outer variables are unique.
batched : bool
Whether the data is batched.
num_tasks : int
Number of tasks.
Returns
-------
object
The computed cost.
Examples
--------
>>> # Example usage here
>>> pass
"""
if order == 1:
def cost_fn_with_variable(v):
return cost_fn(
batch, v=variables.cont_set_at_key_chains(v) if unique_outer else v
)
cost, inner_grads = ivy.execute_with_gradients(
cost_fn_with_variable,
(
variables.cont_at_key_chains(outer_v, ignore_none=True)
if keep_outer_v
else variables.cont_prune_key_chains(outer_v, ignore_none=True)
),
retain_grads=False,
)
var = (
variables.cont_at_key_chains(outer_v, ignore_none=True)
if keep_outer_v
else variables.cont_prune_key_chains(outer_v, ignore_none=True)
)
inner_grads = ivy.Container(
{
k: ivy.zeros_like(v) if k not in inner_grads else inner_grads[k]
for k, v in var.cont_to_iterator()
}
)
if batched:
inner_grads = ivy.multiply(inner_grads, num_tasks)
if average_across_steps_or_final:
all_grads.append(inner_grads)
else:
cost = cost_fn(batch, v=variables)
return cost
def _train_task(
inner_batch,
outer_batch,
inner_cost_fn,
outer_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
order,
average_across_steps,
inner_v,
keep_innver_v,
outer_v,
keep_outer_v,
batched,
num_tasks,
stop_gradients,
):
# init
total_cost = 0
all_grads = []
# inner and outer
unique_inner = inner_v is not None
unique_outer = outer_v is not None
# iterate through inner loop training steps
for i in range(inner_grad_steps):
# compute inner gradient for update the inner variables
cost, inner_update_grads = ivy.execute_with_gradients(
lambda v: inner_cost_fn(
inner_batch,
v=variables.cont_set_at_key_chains(v) if unique_inner else v,
),
(
variables.cont_at_key_chains(inner_v, ignore_none=True)
if keep_innver_v
else variables.cont_prune_key_chains(inner_v, ignore_none=True)
),
retain_grads=order > 1,
)
var = (
variables.cont_at_key_chains(inner_v, ignore_none=True)
if keep_innver_v
else variables.cont_prune_key_chains(inner_v, ignore_none=True)
)
inner_update_grads = ivy.Container(
{
k: (
ivy.zeros_like(v)
if k not in inner_update_grads
else inner_update_grads[k]
)
for k, v in var.cont_to_iterator()
}
)
if batched:
inner_update_grads = ivy.multiply(inner_update_grads, num_tasks)
# compute the cost to be optimized, and update all_grads if fist order method
if outer_cost_fn is None and not unique_inner and not unique_outer:
all_grads.append(inner_update_grads)
else:
cost = _compute_cost_and_update_grads(
inner_cost_fn if outer_cost_fn is None else outer_cost_fn,
order,
outer_batch,
variables,
outer_v,
keep_outer_v,
average_across_steps,
all_grads,
unique_outer,
batched,
num_tasks,
)
# update cost and update parameters
total_cost = total_cost + cost
if unique_inner:
variables = variables.cont_set_at_key_chains(
inner_optimization_step(
(
variables.cont_at_key_chains(inner_v)
if keep_innver_v
else variables.cont_prune_key_chains(inner_v)
),
inner_update_grads,
inner_learning_rate,
stop_gradients=stop_gradients,
)
)
else:
variables = inner_optimization_step(
variables,
inner_update_grads,
inner_learning_rate,
stop_gradients=stop_gradients,
)
# once training is finished, compute the final cost, and update
# all_grads if fist order method
final_cost = _compute_cost_and_update_grads(
inner_cost_fn if outer_cost_fn is None else outer_cost_fn,
order,
outer_batch,
variables,
outer_v,
keep_outer_v,
True,
all_grads,
unique_outer,
batched,
num_tasks,
)
# update variables
if stop_gradients:
variables = variables.stop_gradient()
if not batched:
variables = variables.expand_dims(axis=0)
# average the cost or gradients across all timesteps if this option is chosen
if average_across_steps:
total_cost = total_cost + final_cost
if order == 1:
all_grads = sum(all_grads) / max(len(all_grads), 1)
return total_cost / (inner_grad_steps + 1), variables, all_grads
# else return only the final values
if order == 1:
all_grads = all_grads[-1]
return final_cost, variables, all_grads
def _train_tasks_batched(
batch,
inner_batch_fn,
outer_batch_fn,
inner_cost_fn,
outer_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
order,
average_across_steps,
inner_v,
keep_innver_v,
outer_v,
keep_outer_v,
return_inner_v,
num_tasks,
stop_gradients,
):
inner_batch = batch
outer_batch = batch
if inner_batch_fn is not None:
inner_batch = inner_batch_fn(inner_batch)
if outer_batch_fn is not None:
outer_batch = outer_batch_fn(outer_batch)
cost, updated_ivs, grads = _train_task(
inner_batch,
outer_batch,
inner_cost_fn,
outer_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
order,
average_across_steps,
inner_v,
keep_innver_v,
outer_v,
keep_outer_v,
True,
num_tasks,
stop_gradients,
)
grads = grads.mean(axis=0) if isinstance(grads, ivy.Container) else grads
if order == 1:
if return_inner_v in ["all", True]:
return cost, grads, updated_ivs
elif return_inner_v == "first":
return cost, grads, updated_ivs[0:1]
return cost, grads
if return_inner_v in ["all", True]:
return cost, updated_ivs
elif return_inner_v == "first":
return cost, updated_ivs[0:1]
return cost
def _train_tasks_with_for_loop(
batch,
inner_sub_batch_fn,
outer_sub_batch_fn,
inner_cost_fn,
outer_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
order,
average_across_steps,
inner_v,
keep_innver_v,
outer_v,
keep_outer_v,
return_inner_v,
num_tasks,
stop_gradients,
):
total_cost = 0
updated_ivs_to_return = []
all_grads = []
if isinstance(inner_v, (list, tuple)) and isinstance(
inner_v[0], (list, tuple, dict, type(None))
):
inner_v_seq = True
else:
inner_v_seq = False
if isinstance(outer_v, (list, tuple)) and isinstance(
outer_v[0], (list, tuple, dict, type(None))
):
outer_v_seq = True
else:
outer_v_seq = False
for i, sub_batch in enumerate(batch.cont_unstack_conts(0, True, num_tasks)):
if inner_sub_batch_fn is not None:
inner_sub_batch = inner_sub_batch_fn(sub_batch)
else:
inner_sub_batch = sub_batch
if outer_sub_batch_fn is not None:
outer_sub_batch = outer_sub_batch_fn(sub_batch)
else:
outer_sub_batch = sub_batch
iv = inner_v[i] if inner_v_seq else inner_v
ov = outer_v[i] if outer_v_seq else outer_v
cost, updated_iv, grads = _train_task(
inner_sub_batch,
outer_sub_batch,
inner_cost_fn,
outer_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
order,
average_across_steps,
iv,
keep_innver_v,
ov,
keep_outer_v,
False,
num_tasks,
stop_gradients,
)
if (return_inner_v == "first" and i == 0) or return_inner_v in ["all", True]:
updated_ivs_to_return.append(updated_iv)
total_cost = total_cost + cost
all_grads.append(grads)
if order == 1:
if return_inner_v:
return (
total_cost / num_tasks,
sum(all_grads) / num_tasks,
ivy.concat(updated_ivs_to_return, axis=0),
)
return total_cost / num_tasks, sum(all_grads) / num_tasks
if return_inner_v:
return total_cost / num_tasks, ivy.concat(updated_ivs_to_return, axis=0)
return total_cost / num_tasks
def _train_tasks(
batch,
inner_batch_fn,
outer_batch_fn,
inner_cost_fn,
outer_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
order,
average_across_steps,
batched,
inner_v,
keep_innver_v,
outer_v,
keep_outer_v,
return_inner_v,
num_tasks,
stop_gradients,
):
if batched:
return _train_tasks_batched(
batch,
inner_batch_fn,
outer_batch_fn,
inner_cost_fn,
outer_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
order,
average_across_steps,
inner_v,
keep_innver_v,
outer_v,
keep_outer_v,
return_inner_v,
num_tasks,
stop_gradients,
)
return _train_tasks_with_for_loop(
batch,
inner_batch_fn,
outer_batch_fn,
inner_cost_fn,
outer_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
order,
average_across_steps,
inner_v,
keep_innver_v,
outer_v,
keep_outer_v,
return_inner_v,
num_tasks,
stop_gradients,
)
# Public #
# First Order
@handle_exceptions
@handle_array_function
def fomaml_step(
batch: ivy.Container,
inner_cost_fn: Callable,
outer_cost_fn: Callable,
variables: ivy.Container,
inner_grad_steps: int,
inner_learning_rate: float,
/,
*,
inner_optimization_step: Callable = gradient_descent_update,
inner_batch_fn: Optional[Callable] = None,
outer_batch_fn: Optional[Callable] = None,
average_across_steps: bool = False,
batched: bool = True,
inner_v: Optional[ivy.Container] = None,
keep_inner_v: bool = True,
outer_v: Optional[ivy.Container] = None,
keep_outer_v: bool = True,
return_inner_v: Union[str, bool] = False,
num_tasks: Optional[int] = None,
stop_gradients: bool = True,
) -> Tuple[ivy.Array, ivy.Container, Any]:
"""Perform step of first order MAML.
Parameters
----------
batch
The input batch
inner_cost_fn
callable for the inner loop cost function, receiving sub-batch, inner vars and
outer vars
outer_cost_fn
callable for the outer loop cost function, receiving task-specific sub-batch,
inner vars and outer vars. If None, the cost from the inner loop will also be
optimized in the outer loop.
variables
Variables to be optimized during the meta step
inner_grad_steps
Number of gradient steps to perform during the inner loop.
inner_learning_rate
The learning rate of the inner loop.
inner_optimization_step
The function used for the inner loop optimization.
Default is ivy.gradient_descent_update.
inner_batch_fn
Function to apply to the task sub-batch, before passing to the inner_cost_fn.
Default is ``None``.
outer_batch_fn
Function to apply to the task sub-batch, before passing to the outer_cost_fn.
Default is ``None``.
average_across_steps
Whether to average the inner loop steps for the outer loop update.
Default is ``False``.
batched
Whether to batch along the time dimension, and run the meta steps in batch.
Default is ``True``.
inner_v
Nested variable keys to be optimized during the inner loop, with same keys and
boolean values. (Default value = None)
keep_inner_v
If True, the key chains in inner_v will be kept, otherwise they will be removed.
Default is ``True``.
outer_v
Nested variable keys to be optimized during the inner loop, with same keys and
boolean values.
(Default value = None)
keep_outer_v
If True, the key chains in inner_v will be kept, otherwise they will be removed.
Default is ``True``.
return_inner_v
Either 'first', 'all', or False. 'first' means the variables for the first task
inner loop will also be returned. variables for all tasks will be returned with
'all'.
Default is ``False``.
num_tasks
Number of unique tasks to inner-loop optimize for the meta step. Determined from
batch by default.
stop_gradients
Whether to stop the gradients of the cost.
Default is ``True``.
Returns
-------
ret
The cost and the gradients with respect to the outer loop variables.
"""
if num_tasks is None:
num_tasks = batch.cont_shape[0]
rets = _train_tasks(
batch,
inner_batch_fn,
outer_batch_fn,
inner_cost_fn,
outer_cost_fn,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
1,
average_across_steps,
batched,
inner_v,
keep_inner_v,
outer_v,
keep_outer_v,
return_inner_v,
num_tasks,
stop_gradients,
)
cost = rets[0]
if stop_gradients:
cost = ivy.stop_gradient(cost, preserve_type=False)
grads = rets[1]
if return_inner_v:
return cost, grads, rets[2]
return cost, grads
fomaml_step.computes_gradients = True
@handle_exceptions
@handle_array_function
def reptile_step(
batch: ivy.Container,
cost_fn: Callable,
variables: ivy.Container,
inner_grad_steps: int,
inner_learning_rate: float,
/,
*,
inner_optimization_step: Callable = gradient_descent_update,
batched: bool = True,
return_inner_v: Union[str, bool] = False,
num_tasks: Optional[int] = None,
stop_gradients: bool = True,
) -> Tuple[ivy.Array, ivy.Container, Any]:
"""Perform a step of Reptile.
Parameters
----------
batch
The input batch.
cost_fn
The cost function that receives the task-specific sub-batch and variables, and
returns the cost.
variables
Variables to be optimized.
inner_grad_steps
Number of gradient steps to perform during the inner loop.
inner_learning_rate
The learning rate of the inner loop.
inner_optimization_step
The function used for the inner loop optimization. It takes the learnable
weights,the derivative of the cost with respect to the weights, and the learning
rate as arguments, and returns the updated variables.
Default is `gradient_descent_update`.
batched
Whether to batch along the time dimension and run the meta steps in batch.
Default is `True`.
return_inner_v
Either `'first'`, `'all'`, or `False`. If `'first'`, the variables for the first
task inner loop will also be returned. If `'all'`, variables for all tasks will
be returned. Default is `False`.
num_tasks
Number of unique tasks to inner-loop optimize for the meta step. Determined from
the batch by default.
stop_gradients
Whether to stop the gradients of the cost. Default is `True`.
Returns
-------
ret
The cost, the gradients with respect to the outer loop variables, and additional
information from the inner loop optimization.
Examples
--------
With :class:`ivy.Container` input:
>>> from ivy.functional.ivy.gradients import gradient_descent_update
>>> import ivy
>>> from ivy.functional.ivy.gradients import _variable
>>> ivy.set_backend("torch")
>>> def inner_cost_fn(batch_in, v):
... return batch_in.mean().x / v.mean().latent
>>> num_tasks = 2
>>> batch = ivy.Container({"x": ivy.arange(1, num_tasks + 1, dtype="float32")})
>>> variables = ivy.Container({
... "latent": _variable(ivy.repeat(ivy.array([[1.0]]), num_tasks, axis=0))
... })
>>> cost, gradients = ivy.reptile_step(batch, inner_cost_fn, variables, 5, 0.01,
... num_tasks=num_tasks)
>>> print(cost)
ivy.array(1.4485182)
>>> print(gradients)
{
latent: ivy.array([-139.9569855])
}
>>> batch = ivy.Container({"x": ivy.arange(1, 4, dtype="float32")})
>>> variables = ivy.Container({
... "latent": _variable(ivy.array([1.0, 2.0]))
... })
>>> cost, gradients, firsts = ivy.reptile_step(batch, inner_cost_fn, variables, 4,
... 0.025, batched=False, num_tasks=2,
... return_inner_v='first')
>>> print(cost)
ivy.array(0.9880483)
>>> print(gradients)
{
latent: ivy.array([-13.01766968, -13.01766968])
}
>>> print(firsts)
{
latent: ivy.array([[1.02197957, 2.02197981]])
}
"""
if num_tasks is None:
num_tasks = batch.cont_shape[0]
rets = _train_tasks(
batch,
None,
None,
cost_fn,
None,
variables,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
1,
True,
batched,
None,
True,
None,
True,
return_inner_v,
num_tasks,
stop_gradients,
)
cost = rets[0]
if stop_gradients:
cost = ivy.stop_gradient(cost, preserve_type=False)
grads = rets[1] / inner_learning_rate
if return_inner_v:
return cost, grads, rets[2]
return cost, grads
reptile_step.computes_gradients = True
# Second Order
@handle_exceptions
@handle_array_function
def maml_step(
batch: ivy.Container,
inner_cost_fn: Callable,
outer_cost_fn: Callable,
variables: ivy.Container,
inner_grad_steps: int,
inner_learning_rate: float,
/,
*,
inner_optimization_step: Callable = gradient_descent_update,
inner_batch_fn: Optional[Callable] = None,
outer_batch_fn: Optional[Callable] = None,
average_across_steps: bool = False,
batched: bool = True,
inner_v: Optional[ivy.Container] = None,
keep_inner_v: bool = True,
outer_v: Optional[ivy.Container] = None,
keep_outer_v: bool = True,
return_inner_v: Union[str, bool] = False,
num_tasks: Optional[int] = None,
stop_gradients: bool = True,
) -> Tuple[ivy.Array, ivy.Container, Any]:
"""Perform step of vanilla second order MAML.
Parameters
----------
batch
The input batch
inner_cost_fn
callable for the inner loop cost function, receiving sub-batch, inner vars and
outer vars
outer_cost_fn
callable for the outer loop cost function, receiving task-specific sub-batch,
inner vars and outer vars. If None, the cost from the inner loop will also be
optimized in the outer loop.
variables
Variables to be optimized during the meta step
inner_grad_steps
Number of gradient steps to perform during the inner loop.
inner_learning_rate
The learning rate of the inner loop.
inner_optimization_step
The function used for the inner loop optimization.
Default is ivy.gradient_descent_update.
inner_batch_fn
Function to apply to the task sub-batch, before passing to the inner_cost_fn.
Default is ``None``.
outer_batch_fn
Function to apply to the task sub-batch, before passing to the outer_cost_fn.
Default is ``None``.
average_across_steps
Whether to average the inner loop steps for the outer loop update.
Default is ``False``.
batched
Whether to batch along the time dimension, and run the meta steps in batch.
Default is ``True``.
inner_v
Nested variable keys to be optimized during the inner loop, with same keys and
boolean values. (Default value = None)
keep_inner_v
If True, the key chains in inner_v will be kept, otherwise they will be removed.
Default is ``True``.
outer_v
Nested variable keys to be optimized during the inner loop, with same keys and
boolean values. (Default value = None)
keep_outer_v
If True, the key chains in inner_v will be kept, otherwise they will be removed.
Default is ``True``.
return_inner_v
Either 'first', 'all', or False. 'first' means the variables for the first task
inner loop will also be returned. variables for all tasks will be returned with
'all'. Default is ``False``.
num_tasks
Number of unique tasks to inner-loop optimize for the meta step. Determined from
batch by default.
stop_gradients
Whether to stop the gradients of the cost. Default is ``True``.
Returns
-------
ret
The cost and the gradients with respect to the outer loop variables.
Examples
--------
With :class:`ivy.Container` input:
>>> import ivy
>>> from ivy.functional.ivy.gradients import _variable
>>> ivy.set_backend("torch")
>>> def inner_cost_fn(sub_batch, v):
... return sub_batch.mean().x / v.mean().latent
>>> def outer_cost_fn(sub_batch,v):
... return sub_batch.mean().x / v.mean().latent
>>> num_tasks = 2
>>> batch = ivy.Container({"x": ivy.arange(1, num_tasks + 1, dtype="float32")})
>>> variables = ivy.Container({
... "latent": _variable(ivy.repeat(ivy.array([[1.0]]), num_tasks, axis=0))
... })
>>> cost = ivy.maml_step(batch, inner_cost_fn, outer_cost_fn, variables, 5, 0.01)
>>> print(cost)
(ivy.array(1.40069818), {
latent: ivy.array([-1.13723135])
}, ())
"""
if num_tasks is None:
num_tasks = batch.cont_shape[0]
unique_outer = outer_v is not None
func_ret, grads = ivy.execute_with_gradients(
lambda v: _train_tasks(
batch,
inner_batch_fn,
outer_batch_fn,
inner_cost_fn,
outer_cost_fn,
variables.cont_set_at_key_chains(v) if unique_outer else v,
inner_grad_steps,
inner_learning_rate,
inner_optimization_step,
2,
average_across_steps,
batched,
inner_v,
keep_inner_v,
outer_v,
keep_outer_v,
return_inner_v,
num_tasks,
False,
),
(
variables.cont_at_key_chains(outer_v, ignore_none=True)
if keep_outer_v
else variables.cont_prune_key_chains(outer_v, ignore_none=True)
),
)
if isinstance(func_ret, tuple):
grads = grads["0"] if "0" in grads else grads
cost = func_ret[0]
rest = func_ret[1]
else:
cost = func_ret
rest = ()
if stop_gradients:
cost = ivy.stop_gradient(cost, preserve_type=False)
return cost, grads.sum(axis=0), rest
maml_step.computes_gradients = True
| ivy/ivy/functional/ivy/meta.py/0 | {
"file_path": "ivy/ivy/functional/ivy/meta.py",
"repo_id": "ivy",
"token_count": 11923
} | 48 |
"""Base class for deriving trainable modules."""
# global
from collections import OrderedDict
import os
import copy
import dill
from typing import Optional, Tuple, Dict
# local
import ivy
from ivy.data_classes.container import Container
from ivy.functional.ivy.gradients import _is_variable
from ivy.stateful.helpers import ModuleHelpers
from ivy.stateful.converters import ModuleConverters
class ModuleMeta:
def __new__(cls, *args, **kwargs):
# check the module of the class
# if it's stateful, it's internal
# we leave this untouched
if "stateful" in cls.__module__:
# we are not assigning it a variable
pass
else:
# first check if a var is already assigned
# this would mean it is a nested custom class
if not hasattr(Module, "_init_var"):
# if not , create it and add
Module._init_var = [cls]
else:
Module._init_var.append(cls)
instance = super().__new__(cls)
return instance
class Module(ModuleHelpers, ModuleConverters, ModuleMeta):
"""Module is a base class for deriving trainable modules."""
def __init__(
self,
/,
*args,
v=None,
buffers=None,
build_mode="on_init",
store_vars=True,
with_partial_v=False,
dynamic_backend=None,
training=True,
dtype=None,
device=None,
**kwargs,
):
"""Initialize Ivy layer, which is a stateful object consisting of
trainable variables.
Parameters
----------
args
Positional arguments to the _build method.
v
Ivy container of trainable variables. Created internally by default.
buffers
Ivy container of buffers/non-trainable arrays in the state_dict.
build_mode
How the Module is built, either on initialization (now),
explicitly by the user by calling build(), or the first
time the __call__ method is run. Default is on initialization.
store_vars
Whether or not to store the variables created. Default is ``True``.
with_partial_v
Whether to allow partial specification of variables. Default is ``False``.
dynamic_backend
When the value is true, allow conversion of arrays from a different backend
to the current backend if v passed in the input contains arrays created with
different backend.
training
specifies whether the module is in training or evaluation mode. Default is
``True``.
dtype
Data type to be used for creating model variables. (Default value = None).
device
Device on which to create the module's variables 'cuda:0', 'cuda:1', 'cpu'
etc. (Default value = None).
kwargs
Keyword arguments to the _build method.
"""
valid_build_modes = ["on_init", "explicit", "on_call"]
ivy.utils.assertions.check_elem_in_list(build_mode, valid_build_modes)
self._build_mode = build_mode
self._with_partial_v = with_partial_v
self._store_vars = store_vars
self._built = False
self._v_from_constructor = (
v if isinstance(v, Container) or v is None else Container(v)
)
self._v = v if v is not None else Container()
self._buffers = Container(ivy.default(buffers, {}))
self._module_dict = Container()
self._args = args
self._kwargs = kwargs
self._module_graph = None
self._target = None
self._lazy_traced = False
self._training = training
self._dynamic_backend = dynamic_backend
self._device = ivy.default(device, ivy.default_device())
self._dtype = ivy.default(dtype, ivy.default_dtype())
if build_mode != "on_init":
return
if hasattr(Module, "_init_var"):
if "stateful" in self.__module__:
# we know we are operating within the
# context of another class, and it's a
# stateful class internally defined
# so we freeze weight generation
# unless `v` or `with_partial_v` is passed
if v or with_partial_v:
# build only if `v` or `with_partial_v`
self.build(*args, dynamic_backend=dynamic_backend, **kwargs)
# we don't want to delete the class variable now
# since there could be other child modules
return
# we know this is the custom class that has triggered the
# class var, so we do the building, and after that delete
# the class variable, but before that we check if it's a
# nested scenario, because if it's another custom class initialised
# within another one, then we have to hold variable initialisation
# here too, unless `v` or `with_partial_v`
if len(Module._init_var) > 1 and not v and not with_partial_v:
# hold off initialisation, delete key for this class and
# move on
Module._init_var.pop()
return
self.build(*args, dynamic_backend=dynamic_backend, **kwargs)
if Module._init_var[-1] == self.__class__.__name__:
# you delete it, only if this is the class that caused it's creation
Module._init_var.pop()
# do a final check if _init_var becomes empty, then delete it all together
if not Module._init_var:
del Module._init_var
return
self.build(*args, dynamic_backend=dynamic_backend, **kwargs)
# Public Methods #
# ---------------#
def build(
self,
*args,
from_call=False,
device=None,
dtype=None,
dynamic_backend=None,
**kwargs,
):
"""Build the internal layers and variables for this module.
Parameters
----------
args
Positional arguments to the _build method.
from_call
If True, denote that this build is triggered by calling. Otherwise,
triggered by initializing the module. Default is ``False``.
device
The device we want to build module on. None for default device.
Default is ``None``.
dtype
The data type for building the module. Default is ``None``.
dynamic_backend
Whether to use dynamic backend setting to deal if variables are passed as
input and created with a different backend to the current backend.
kwargs
Keyword arguments to the _build method.
Returns
-------
ret
True for successfully built a module.
"""
self._device = ivy.default(device, self._device)
self._dtype = ivy.default(dtype, self._dtype)
self._dynamic_backend = ivy.default(dynamic_backend, self._dynamic_backend)
# return False if not from_call but build_mode is on_call
if not from_call and self._build_mode == "on_call":
return self.v
# why are we adding this kwarg in user-defined build ?
# it results in the error while doing `from_haiku_module` if haiku's forward
# therefore leaving it commented out
# kwargs["dtype"] = dtype
# build local Module, and any child modules flagged with "explicit" build mode
# this gets the child modules initialised at best, their weights
# remain un-generated
built = ivy.default(self._build(*args, **kwargs), True)
# this creates weights for this Module only
created = Container(
self._create_variables(device=self._device, dtype=dtype),
dynamic_backend=self._dynamic_backend,
)
# build variables based on locally built layers, if v not passed in constructor
created_n_found = Container(
dict(
**self._find_variables(
obj=self,
without_initialisation=(
True
if self._v_from_constructor and not self._with_partial_v
else False
),
),
**created,
),
dynamic_backend=self._dynamic_backend,
)
created_n_found.cont_config["build_callable"] = True
if ivy.exists(self._v_from_constructor):
if self._with_partial_v:
if self._v_from_constructor:
created_n_found.cont_assert_contains_sub_structure(
self._v_from_constructor, partial=True
)
self._v = created_n_found.cont_set_at_key_chains(
self._v_from_constructor
)
else:
created_n_found, _ = self._remove_duplicate_variables(
created_n_found, created
)
ivy.Container.cont_assert_identical_structure(
[created_n_found, self._v_from_constructor],
assert_and_assign=True,
)
self._v = created_n_found
else:
self._v = created_n_found
# remove duplicates
self._v, keychain_mappings = self._remove_duplicate_variables(self._v, created)
# build any child 'on_call' layers
if not built and from_call:
# update child modules to share the same device
for v in self.__dict__.values():
if isinstance(v, ivy.Module):
v._device = self._device
# build during forward pass
self._forward(*args, **kwargs)
# re-build variables based on additional child on-call layers, if v not
# passed in constructor
if not ivy.exists(self._v_from_constructor):
created_n_found = Container(
dict(
**self._find_variables(obj=self),
**self._create_variables(device=self._device, dtype=dtype),
),
dynamic_backend=self._dynamic_backend,
)
self._v = created_n_found
# remove further duplicates with self.v
self._v, keychain_mappings = self._remove_duplicate_variables(
self._v, created
)
# set built flag
built = True
# wrap call methods if the module is fully built
if built:
self._wrap_call_methods(keychain_mappings, obj=self)
# flag built and remove local variables if specified
self._built = bool(built)
v_ret = self.v
if not self._store_vars:
# ToDo: verify variables in self.v are released once this method exits
self._v = ivy.Container()
# compute the module dict
self._compute_module_dict()
# once all variables built, find and assign buffers
self._find_buffers()
return v_ret if bool(v_ret) or isinstance(built, bool) else built
def trace_graph(
self,
args: Optional[Tuple] = None,
kwargs: Optional[Dict] = None,
**trace_kwargs,
):
"""Trace the `ivy.Module`'s `_unified_ivy_graph` or `_call` method to
the target backend.
Parameters
----------
args:
arguments used to trace. Defaults to None.
kwargs:
keyword arguments used to trace. Defaults to None.
trace_kwargs:
keyword arguments passed to the trace function.
"""
# no arguments given to trace, so delay the compilation
if not (args or kwargs):
self._lazy_traced = True
return
# we do not need convert the args to source
args = ivy.default(args, ())
kwargs = ivy.default(kwargs, {})
# shallow copy the kwargs dict
kwargs = copy.copy(kwargs)
kwargs["v"] = self.v
fn_to_trace = ivy.default(self._module_graph, self._call)
self._module_graph = ivy.trace_graph(
fn_to_trace, **trace_kwargs, args=args, kwargs=kwargs
)
self._lazy_traced = False
def register_buffer(self, name, value):
"""Register a buffer.
Parameters
----------
name
Name of the buffer
value
Value of the buffer
"""
if value is not None:
self._buffers.update({name: value})
else:
super().__setattr__(name, value)
def register_parameter(self, name, value):
"""Register a parameter.
Parameters
----------
name
Name of the parameter
value
Value of the parameter
"""
self._v.update({name: value})
def train(self, mode: bool = True):
"""Enable or disable training mode."""
self._training = mode
for module in self.v:
module = getattr(self, module, None)
if isinstance(module, ivy.Module):
module.train(mode=mode)
return self
def eval(self):
"""Disable training mode."""
return self.train(mode=False)
def to_device(self, device):
"""Move the weights and buffers to the specified device."""
self._device = ivy.default(device, self._device)
for obj in self.state_dict.values():
if isinstance(obj, ivy.Module):
obj.to_device(device)
elif ivy.is_array(obj) or ivy.is_ivy_container(obj):
ivy.to_device(obj, device, out=obj)
return self
def show_graph(
self,
randomness_factor: float = 0.1,
save_to_disk: bool = False,
notebook: bool = False,
with_edge_labels: bool = True,
with_arg_labels: bool = True,
with_output_labels: bool = True,
output_connected_only: bool = True,
highlight_subgraph: Optional[int] = None,
fname: Optional[str] = None,
):
if not ivy.exists(self._module_graph):
raise ValueError("You must trace the module to display the graph.")
return self._module_graph.show(
save_to_disk=save_to_disk,
notebook=notebook,
with_edge_labels=with_edge_labels,
with_arg_labels=with_arg_labels,
with_output_labels=with_output_labels,
output_connected_only=output_connected_only,
randomness_factor=randomness_factor,
highlight_subgraph=highlight_subgraph,
fname=fname,
)
def save_weights(self, weights_path, /):
"""Save the weights on the Module.
Parameters
----------
weights_path
The hdf5 file for saving the weights.
Returns
-------
None
"""
os.makedirs("/".join(weights_path.split("/")[:-1]), exist_ok=True)
self.v.cont_to_disk_as_hdf5(weights_path)
def save(self, filename):
"""Save the module object to disk using pickle.
Parameters
----------
filename : str
The name of the file to save the module object to.
"""
if ivy.current_backend_str() == "paddle":
self._convert_tensors_to_numpy()
with open(filename, "wb") as f:
dill.dump(self, f)
if ivy.current_backend_str() == "paddle":
self._convert_numpy_to_tensors()
@staticmethod
def load(filename):
"""Load a module object from disk using pickle.
Parameters
----------
filename : str
The name of the file to load the module object from.
Returns
-------
Module
The loaded module object.
"""
with open(filename, "rb") as f:
loaded = dill.load(f)
if ivy.current_backend_str() == "paddle":
loaded._convert_numpy_to_tensors()
return loaded
# Dunder Methods #
# ---------------#
def __call__(
self,
*args,
v=None,
buffers=None,
**kwargs,
):
"""Forward an input through current module.
Parameters
----------
args
Positional args to the build method.
v
If given, use this container as internal variables temporarily.
Default is ``None``.
buffers
If given, use this container as internal buffers temporarily.
Default is ``None``.
kwargs
Keyword arguments to the build method.
Returns
-------
ret
"""
if self._lazy_traced:
# we are creating graph since we want to transpile module,
# so set the appropriate backend
if self._target:
ivy.set_backend(self._target)
self.trace_graph(args=args, kwargs=kwargs)
if self._target:
ivy.previous_backend()
if self._module_graph:
# we need `v` in kwargs, since this is a traced call
v = v if v else self.v
return self._module_graph(*args, v=v, **kwargs)
# convert variables to native arrays so that they can be tracked
v = ivy.to_native(v)
ret = self._call(*args, v=v, buffers=buffers, **kwargs)
return ret
def __getattribute__(self, name):
if name == "v":
if not super().__getattribute__("_v") and not self.built:
self._build_and_return_v(
*self._args, dynamic_backend=self._dynamic_backend, **self._kwargs
)
return super().__getattribute__(name)
def __setattr__(self, name, value):
if name in ["v", "buffers"]:
name = "_" + name
if isinstance(value, Module):
ret = super().__setattr__(name, value)
if (
hasattr(self, "_build_mode")
and self.build_mode == "on_init"
and self.built
):
self._rebuild()
return ret
return super().__setattr__(name, value)
def __delattr__(self, name):
if hasattr(self, name):
if isinstance(getattr(self, name), Module):
super().__delattr__(name)
if self.build_mode == "on_init":
self._rebuild()
return
super().__delattr__(name)
def __repr__(self):
extra_lines = []
extra_repr = self._extra_repr()
if extra_repr:
extra_lines = extra_repr.split("\n")
child_lines = []
for key in self.v.keys():
if isinstance(getattr(self, key, None), Module):
mod_str = repr(getattr(self, key))
mod_str = self._addindent(mod_str, 2)
child_lines.append(f"({key}): {mod_str}")
lines = extra_lines + child_lines
main_str = f"{self.__class__.__name__}("
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
# Methods to be Optionally Overridden #
# -----------------------------------#
def _create_variables(self, *, device=None, dtype=None):
"""Create internal trainable variables, and return as arbitrary nested
dict. Overridable.
Parameters
----------
device
The device string, specifying the device on which to create the variables.
dtype
The dtype string, specifying the dtype on which to create the variables.
Returns
-------
ret
An empty set.
"""
return {}
def _build(self, *args, **kwargs) -> bool:
"""Build the internal layers and variables for this module.
Overridable.
Returns
-------
ret
False or empty Container if the build only partially completed (i.e. some
child Modules have "on_call" build mode). Alternatively, return True or a
container of the built variables if the module is built.
"""
return True
def _forward(self, *args, **kwargs):
"""Forward pass of the layer, called after handling the optional input
variables.
Raises
------
NotImplementedError
"""
raise ivy.utils.exceptions.IvyNotImplementedException
def _extra_repr(self) -> str:
"""Set the extra representation of the module.
To print customized extra information, you should re-implement
this method in your own modules. Both single-line and multi-line
strings are acceptable.
"""
return ""
# Properties #
# -----------#
@property
def device(self):
return self._device
@property
def dtype(self):
return self._dtype
@property
def build_mode(self):
return self._build_mode
@property
def built(self):
return self._built
@property
def training(self):
return self._training
@property
def v(self):
return self._v
@property
def buffers(self):
return self._buffers
@property
def state_dict(self):
"""Return the state_dict which is a collection of the variables and
buffers."""
return {**self.v, **self.buffers}
@property
def module_dict(self):
return self._module_dict
class _HaikuIvyModule(Module):
def __init__(self, *args, params_hk, native_module, device, devices, **kwargs):
self._native_module = native_module
self._args = args
self._kwargs = kwargs
ivy.Module.__init__(
self,
params_hk,
*args,
build_mode="on_init",
device=device,
devices=devices,
**kwargs,
)
def _create_variables(self, device, dtype):
return self._hk_params
def _build(self, params_hk, *args, **kwargs):
pass
args, kwargs = ivy.args_to_native(*args, **kwargs)
# noinspection PyUnresolvedReferences
params_dict = self._hk_flat_map_to_dict(params_hk)
self._hk_params = ivy.Container(params_dict, dynamic_backend=False)
param_iterator = self._hk_params.cont_to_iterator()
_, param0 = next(param_iterator, ["_", 0])
if hasattr(param0, "device"):
self._device = ivy.as_ivy_dev(param0.device())
else:
self._device = ivy.as_ivy_dev("cpu")
def _forward(self, *a, **kw):
a, kw = ivy.args_to_native(*a, **kw)
params_hk = self._dict_to_hk_flat_map(self.v.cont_to_dict())
ret = self._native_module.apply(params_hk, 0, *a, **kw)
nested = isinstance(ret, tuple)
return ivy.to_native(ret, nested=nested)
def _hk_flat_map_to_dict(self, hk_flat_map):
from haiku._src.data_structures import FlatMapping
ret_dict = {}
for k, v in hk_flat_map.items():
new_k = k.replace("/", "|")
if isinstance(v, FlatMapping):
ret_dict[new_k] = self._hk_flat_map_to_dict(v)
else:
ret_dict[new_k] = v
return ret_dict
def _dict_to_hk_flat_map(self, dict_in):
from haiku._src.data_structures import FlatMapping
ret_flat_map = {}
for k, v in dict_in.items():
new_k = k.replace("|", "/")
if isinstance(v, dict):
ret_flat_map[new_k] = self._dict_to_hk_flat_map(v)
else:
ret_flat_map[new_k] = v
return FlatMapping(ret_flat_map)
class _FlaxIvyModule(Module):
def __init__(self, *args, params_fx, native_module, device, devices, **kwargs):
self._native_module = native_module
self._args = args
self._kwargs = kwargs
ivy.Module.__init__(
self,
params_fx,
*args,
build_mode="on_init",
device=device,
devices=devices,
**kwargs,
)
def _create_variables(self, device, dtype):
return self._fx_params
def _build(self, params_fx, *args, **kwargs):
import flax
args, kwargs = ivy.args_to_native(*args, **kwargs)
# noinspection PyUnresolvedReferences
params_dict = flax.core.unfreeze(params_fx)
self._fx_params = ivy.Container(params_dict, dynamic_backend=False)
param_iterator = self._fx_params.cont_to_iterator()
_, param0 = next(param_iterator, ["_", 0])
self._device = ivy.as_ivy_dev(ivy.dev(param0))
def _forward(self, *a, **kw):
import flax
a, kw = ivy.args_to_native(*a, **kw)
params_fx = flax.core.freeze(self.v.cont_to_dict())
ret = self._native_module.apply(params_fx, *a, **kw)
nested = isinstance(ret, tuple)
return ivy.to_native(ret, nested=nested)
class _KerasIvyModule(Module):
def __init__(self, *args, native_module, device, devices, **kwargs):
self._native_module = native_module
self._args = args
self._kwargs = kwargs
ivy.Module.__init__(self, *args, device=device, devices=devices, **kwargs)
def _create_variables(self, device=None, dtype=None):
return self._native_params
def _build(self, *args, **kwargs):
self._native_params = ivy.Container(
OrderedDict(
sorted([(param.name, param) for param in self._native_module.variables])
),
dynamic_backend=False,
)
def _forward(self, *a, **kw):
a, kw = ivy.args_to_native(*a, **kw)
ret = self._native_module(*a, **kw)
nested = isinstance(ret, tuple)
return ivy.to_native(ret, nested=nested)
class _PaddleIvyModule(Module):
def __init__(self, *args, native_module, device, devices, **kwargs):
self._native_module = native_module
self._args = args
self._kwargs = kwargs
ivy.Module.__init__(self, *args, device=device, devices=devices, **kwargs)
def _create_variables(self, device=None, dtype=None):
return self._native_params
def _build(self, *args, **kwargs):
self._native_params = ivy.Container(
OrderedDict(
sorted(
[
(k.replace(".", "/"), v)
for k, v in dict(self._native_module.named_parameters()).items()
]
)
),
dynamic_backend=False,
)
def _forward(self, *a, **kw):
a, kw = ivy.args_to_native(*a, **kw)
ret = self._native_module(*a, **kw)
nested = isinstance(ret, tuple)
return ivy.to_native(ret, nested=nested)
class _TorchIvyModule(Module):
def __init__(self, *args, native_module, device, devices, inplace_update, **kwargs):
self._native_module = native_module
self._args = args
self._kwargs = kwargs
self._update_v = (
self._inplace_update_v if inplace_update else self._replace_update_v
)
ivy.Module.__init__(self, *args, device=device, devices=devices, **kwargs)
def _create_variables(self, device=None, dtype=None):
return self._native_params
def _build(self, *args, **kwargs):
self._native_params = ivy.Container(
OrderedDict(
sorted(
[
(k.replace(".", "/"), v)
for k, v in dict(self._native_module.named_parameters()).items()
]
)
),
dynamic_backend=False,
)
@staticmethod
def _inplace_update(p, v):
p.data = v.data
def _inplace_update_v(self, new_v):
ivy.Container.cont_multi_map(
lambda xs, kc: self._inplace_update(xs[0], xs[1]),
[self._native_params, new_v],
)
def _replace_update_v(self, new_v, native=None):
import torch
native = ivy.default(native, self._native_module)
for k, v in new_v.items():
if isinstance(v, ivy.Container):
# noinspection PyProtectedMember
native._modules[k] = self._replace_update_v(v, native._modules[k])
elif _is_variable(v):
# noinspection PyProtectedMember
native.__setattr__(k, v)
elif isinstance(v, torch.Tensor):
# noinspection PyProtectedMember
native.__setattr__(
k, torch.nn.Parameter(v, requires_grad=v.requires_grad)
)
else:
raise ivy.utils.exceptions.IvyException(
f"found item in variable container {v} which was neither a sub"
" ivy.Container nor a variable."
)
return native
def _forward(self, *a, **kw):
a, kw = ivy.args_to_native(*a, **kw)
self._update_v(self.v)
ret = self._native_module(*a, **kw)
nested = isinstance(ret, tuple)
return ivy.to_native(ret, nested=nested)
| ivy/ivy/stateful/module.py/0 | {
"file_path": "ivy/ivy/stateful/module.py",
"repo_id": "ivy",
"token_count": 14058
} | 49 |
# global
from typing import get_type_hints
# local
import ivy
def _is_optional(typ):
# noinspection PyBroadException
try:
rep = typ.__repr__().split(".")[1]
if rep.startswith("Optional") or (
rep.startswith("Union") and type(None) in typ.__args__
):
return True
except BaseException as error:
print(f"Exception occurred: {error}")
return False
def _is_union(typ):
# noinspection PyBroadException
try:
rep = typ.__repr__().split(".")[1]
if rep.startswith("Union"):
return True
except BaseException as error:
print(f"Exception occurred: {error}")
return False
def _is_dict(typ):
# noinspection PyBroadException
try:
rep = typ.__repr__().split(".")[1]
if rep.startswith("Dict"):
return True
except BaseException as error:
print(f"Exception occurred: {error}")
return False
def _is_iterable(typ):
# noinspection PyBroadException
try:
rep = typ.__repr__().split(".")[1]
if rep.startswith("List") or rep.startswith("Tuple"):
return True
except BaseException as error:
print(f"Exception occurred: {error}")
return False
def _correct_index(is_opt, is_dict, is_iter):
if is_opt:
return ["optional"]
elif is_dict:
return [str]
elif is_iter:
return [int]
return []
def _get_array_idxs(typ, idx_so_far=None):
idx_so_far = ivy.default(idx_so_far, [])
these_idxs = []
if not hasattr(typ, "__args__"):
return these_idxs
is_opt = _is_optional(typ)
is_union = _is_union(typ)
is_dict = _is_dict(typ)
is_iter = _is_iterable(typ)
for a in typ.__args__:
a_repr = repr(a)
if (
"[" not in a_repr
and "]" not in a_repr
and "ivy." in a_repr
and (".Array" in a_repr or ".NativeArray" in a_repr)
):
these_idxs.append(idx_so_far + _correct_index(is_opt, is_dict, is_iter))
if is_union:
break
else:
these_idxs += _get_array_idxs(
a, idx_so_far + _correct_index(is_opt, is_dict, is_iter)
)
return these_idxs
def fn_array_spec(fn):
"""Return a specification of the function, indicating all arguments which
include arrays, and the indexes of these.
Parameters
----------
fn
function to inspect
Returns
-------
ret
specification
"""
try: # this is because it raises error if python version 3.8.0, in certain cases
type_hints = get_type_hints(fn)
except Exception:
type_hints = {}
array_idxs = []
for i, (k, v) in enumerate(type_hints.items()):
a_idxs = _get_array_idxs(v)
if not a_idxs:
continue
a_idxs = [[(i, k)] + a for a in a_idxs]
array_idxs += a_idxs
return array_idxs
def add_array_specs():
for k, v in ivy.__dict__.items():
if callable(v) and k[0].islower():
v.array_spec = fn_array_spec(v)
| ivy/ivy/utils/inspection.py/0 | {
"file_path": "ivy/ivy/utils/inspection.py",
"repo_id": "ivy",
"token_count": 1483
} | 50 |
# Hypothesis strategies
from . import hypothesis_helpers
from .hypothesis_helpers import *
# Testing
from . import assertions
from .assertions import *
from . import function_testing
from .function_testing import *
from . import testing_helpers
from .testing_helpers import *
| ivy/ivy_tests/test_ivy/helpers/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/__init__.py",
"repo_id": "ivy",
"token_count": 73
} | 51 |
from abc import ABC, abstractproperty, abstractmethod
from dataclasses import dataclass
from typing import List
import ivy
@dataclass
class SupportedDeviecs:
valid_devices: List[str]
invalid_devices: List[str]
# TODO can be refactored and be constructed dynamically
@dataclass
class SupportedDtypes:
valid_dtypes: List[str]
invalid_dtypes: List[str]
valid_numeric_dtypes: List[str]
invalid_numeric_dtypes: List[str]
valid_int_dtypes: List[str]
invalid_int_dtypes: List[str]
valid_uint_dtypes: List[str]
invalid_uint_dtypes: List[str]
valid_float_dtypes: List[str]
invalid_float_dtypes: List[str]
valid_complex_dtypes: List[str]
invalid_complex_dtypes: List[str]
class FrontendConfig(ABC):
@abstractproperty
def supported_dtypes(self) -> SupportedDtypes:
pass
@abstractproperty
def supported_devices(self) -> SupportedDeviecs:
pass
@abstractproperty
def Dtype(self):
pass
@abstractproperty
def Device(self):
pass
@abstractmethod
def native_array(self, x):
pass
@abstractmethod
def is_native_array(self, x):
pass
@abstractmethod
def to_numpy(self, x):
pass
@abstractmethod
def as_native_dtype(self, dtype: str):
pass
@abstractmethod
def as_native_device(self, device: str):
pass
@abstractmethod
def isscalar(self, x):
pass
class FrontendConfigWithBackend(FrontendConfig):
backend_str = None
def __init__(self):
self.backend = ivy.with_backend(self.backend_str)
@property
def Dtype(self):
return self.backend.Dtype
@property
def Device(self):
return self.backend.Device
@property
def supported_devices(self):
return SupportedDeviecs(
valid_devices=self.backend.valid_devices,
invalid_devices=self.backend.invalid_devices,
)
@property
def supported_dtypes(self):
return SupportedDtypes(
valid_dtypes=self.backend.valid_dtypes,
invalid_dtypes=self.backend.invalid_dtypes,
valid_numeric_dtypes=self.backend.valid_numeric_dtypes,
invalid_numeric_dtypes=self.backend.invalid_numeric_dtypes,
valid_int_dtypes=self.backend.valid_int_dtypes,
invalid_int_dtypes=self.backend.invalid_int_dtypes,
valid_uint_dtypes=self.backend.valid_uint_dtypes,
invalid_uint_dtypes=self.backend.invalid_uint_dtypes,
valid_float_dtypes=self.backend.valid_float_dtypes,
invalid_float_dtypes=self.backend.invalid_float_dtypes,
valid_complex_dtypes=self.backend.valid_complex_dtypes,
invalid_complex_dtypes=self.backend.invalid_complex_dtypes,
)
def native_array(self, x):
return self.backend.native_array(x)
def is_native_array(self, x):
return self.backend.is_native_array(x)
def to_numpy(self, x):
return self.backend.to_numpy(x)
def as_native_dtype(self, dtype: str):
return self.backend.as_native_dtype(dtype)
def as_native_device(self, device: str):
return self.backend.as_native_dev(device)
def isscalar(self, x):
return self.backend.isscalar(x)
| ivy/ivy_tests/test_ivy/test_frontends/config/base.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/base.py",
"repo_id": "ivy",
"token_count": 1442
} | 52 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# fft
@handle_frontend_test(
fn_tree="jax.numpy.fft.fft",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("complex"),
num_arrays=1,
min_value=-1e5,
max_value=1e5,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
allow_inf=False,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
valid_axis=True,
force_int_axis=True,
),
n=st.integers(min_value=2, max_value=10),
norm=st.sampled_from(["backward", "ortho", "forward", None]),
)
def test_jax_numpy_fft(
dtype_values_axis, n, norm, frontend, backend_fw, test_flags, fn_tree, on_device
):
dtype, values, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=values[0],
n=n,
axis=axis,
norm=norm,
atol=1e-02,
rtol=1e-02,
)
# fft2
@handle_frontend_test(
fn_tree="jax.numpy.fft.fft2",
dtype_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex"),
num_arrays=1,
min_value=-1e5,
max_value=1e5,
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=5,
allow_inf=False,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
axes=st.sampled_from([(0, 1), (-1, -2), (1, 0)]),
s=st.tuples(
st.integers(min_value=2, max_value=256), st.integers(min_value=2, max_value=256)
),
norm=st.sampled_from(["backward", "ortho", "forward", None]),
)
def test_jax_numpy_fft2(
dtype_values,
s,
axes,
norm,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, values = dtype_values
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=values[0],
s=s,
axes=axes,
norm=norm,
atol=1e-02,
rtol=1e-02,
)
# fftfreq
@handle_frontend_test(
fn_tree="jax.numpy.fft.fftfreq",
n=st.integers(min_value=10, max_value=100),
sample_rate=st.integers(min_value=1, max_value=10),
dtype=st.one_of(helpers.get_dtypes("float", full=False), st.none()),
)
def test_jax_numpy_fftfreq(
n, sample_rate, dtype, backend_fw, frontend, test_flags, fn_tree, on_device
):
d = 1 / sample_rate
dtype = dtype[0] if dtype is not None else None
helpers.test_frontend_function(
input_dtypes=[int],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=True,
n=n,
d=d,
dtype=dtype,
)
# fftshift
@handle_frontend_test(
fn_tree="jax.numpy.fft.fftshift",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), shape=(4,), array_api_dtypes=True
),
)
def test_jax_numpy_fftshift(
dtype_and_x, backend_fw, frontend, test_flags, fn_tree, on_device
):
input_dtype, arr = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=True,
x=arr[0],
axes=None,
)
# ifft
@handle_frontend_test(
fn_tree="jax.numpy.fft.ifft",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("complex"),
num_arrays=1,
min_value=-1e5,
max_value=1e5,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
allow_inf=False,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
valid_axis=True,
force_int_axis=True,
),
n=st.integers(min_value=2, max_value=10),
norm=st.sampled_from(["backward", "ortho", "forward", None]),
)
def test_jax_numpy_ifft(
dtype_values_axis, n, norm, frontend, backend_fw, test_flags, fn_tree, on_device
):
dtype, values, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=values[0],
n=n,
axis=axis,
norm=norm,
atol=1e-02,
rtol=1e-02,
)
# ifft2
@handle_frontend_test(
fn_tree="jax.numpy.fft.ifft2",
dtype_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
min_value=-1e5,
max_value=1e5,
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=5,
allow_inf=False,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
axes=st.sampled_from([(0, 1), (-1, -2), (1, 0)]),
s=st.tuples(
st.integers(min_value=2, max_value=256), st.integers(min_value=2, max_value=256)
),
norm=st.sampled_from(["backward", "ortho", "forward", None]),
)
def test_jax_numpy_ifft2(
dtype_values,
s,
axes,
norm,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, values = dtype_values
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=values[0],
s=s,
axes=axes,
norm=norm,
atol=1e-02,
rtol=1e-02,
)
# rfft
@handle_frontend_test(
fn_tree="jax.numpy.fft.rfft",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_value=-1e5,
max_value=1e5,
min_num_dims=1,
min_dim_size=2,
allow_inf=False,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
valid_axis=True,
force_int_axis=True,
),
n=st.one_of(
st.integers(min_value=2, max_value=10),
st.just(None),
),
norm=st.sampled_from(["backward", "ortho", "forward", None]),
)
def test_jax_numpy_rfft(
dtype_input_axis, n, norm, frontend, backend_fw, test_flags, fn_tree, on_device
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
n=n,
axis=axis,
norm=norm,
atol=1e-04,
rtol=1e-04,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_fft.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_fft.py",
"repo_id": "ivy",
"token_count": 3924
} | 53 |
import numpy
from ivy_tests.test_ivy.test_frontends import NativeClass
numpy_classes_to_ivy_classes = {numpy._NoValue: None}
def convnumpy(argument):
"""Convert NativeClass in argument to ivy frontend counterpart for
numpy."""
if isinstance(argument, NativeClass):
return numpy_classes_to_ivy_classes.get(argument._native_class)
return argument
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/__init__.py",
"repo_id": "ivy",
"token_count": 129
} | 54 |
# global
import numpy as np
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
# resize
@st.composite
def dtype_and_resize(draw):
dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
),
)
)
new_shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
),
)
return dtype, x, new_shape
@st.composite
def dtypes_x_reshape(draw):
dtypes, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
)
)
shape = draw(helpers.reshape_shapes(shape=np.array(x).shape))
return dtypes, x, shape
# asanyarray
@handle_frontend_test(
fn_tree="numpy.asanyarray",
dtype_and_a=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_numpy_asanyarray(
*,
dtype_and_a,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
)
# asarray_chkfinite
@handle_frontend_test(
fn_tree="numpy.asarray_chkfinite",
dtype_and_a=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_numpy_asarray_chkfinite(
*,
dtype_and_a,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
)
# asfarray
@handle_frontend_test(
fn_tree="numpy.asfarray",
dtype_and_a=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_numpy_asfarray(
*,
dtype_and_a,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
)
@handle_frontend_test(
fn_tree="numpy.broadcast_to",
dtype_x_shape=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), ret_shape=True
),
factor=helpers.ints(min_value=1, max_value=5),
test_with_out=st.just(False),
)
def test_numpy_broadcast_to(
*,
dtype_x_shape,
factor,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, shape = dtype_x_shape
broadcast_shape = (factor,) + shape
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
array=x[0],
shape=broadcast_shape,
)
# moveaxis
@handle_frontend_test(
fn_tree="numpy.moveaxis",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
),
source=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
destination=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
test_with_out=st.just(False),
)
def test_numpy_moveaxis(
*,
dtype_and_a,
source,
destination,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
source=source,
destination=destination,
)
@handle_frontend_test(
fn_tree="numpy.ravel",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
order=st.sampled_from(["C", "F", "A", "K"]),
test_with_out=st.just(False),
)
def test_numpy_ravel(
*,
dtype_and_x,
order,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
order=order,
)
# require
@handle_frontend_test(
fn_tree="numpy.require",
dtype_and_a=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
requirements=st.sampled_from(["C", "F", "A", "O", "W", "E"]),
like=st.just(None),
test_with_out=st.just(False),
)
def test_numpy_require(
*,
dtype_and_a,
requirements,
like,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
dtype=np.dtype(dtype[0]),
requirements=requirements,
like=like,
)
# reshape
@handle_frontend_test(
fn_tree="numpy.reshape",
dtypes_x_shape=dtypes_x_reshape(),
order=st.sampled_from(["C", "F", "A"]),
)
def test_numpy_reshape(
*,
dtypes_x_shape,
order,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, x, shape = dtypes_x_shape
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
newshape=shape,
order=order,
)
@handle_frontend_test(
fn_tree="numpy.resize",
dtypes_x_shape=dtype_and_resize(),
)
def test_numpy_resize(
*,
dtypes_x_shape,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, new_shape = dtypes_x_shape
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
newshape=new_shape,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_changing_array_shape.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_changing_array_shape.py",
"repo_id": "ivy",
"token_count": 4373
} | 55 |
# global
from hypothesis import assume, strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
import ivy
# --- Helpers --- #
# --------------- #
@st.composite
def _get_clip_inputs(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10
)
)
x_dtype, x, casting, dtype = draw(
np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape,
)
],
),
)
min = draw(
helpers.array_values(dtype=x_dtype[0], shape=shape, min_value=-50, max_value=5)
)
max = draw(
helpers.array_values(dtype=x_dtype[0], shape=shape, min_value=6, max_value=50)
)
return x_dtype, x, min, max, casting, dtype
# --- Main --- #
# ------------ #
# absolute
@handle_frontend_test(
fn_tree="numpy.absolute",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="absolute"
),
)
def test_numpy_absolute(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# cbrt
@handle_frontend_test(
fn_tree="numpy.cbrt",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="cbrt"
),
)
def test_numpy_cbrt(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# clip
@handle_frontend_test(
fn_tree="numpy.clip",
input_and_ranges=_get_clip_inputs(),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="clip"
),
test_with_out=st.just(False),
)
def test_numpy_clip(
input_and_ranges,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, min, max, casting, dtype = input_and_ranges
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
a_min=min,
a_max=max,
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
@handle_frontend_test(
fn_tree="numpy.convolve",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=1,
num_arrays=2,
min_value=-10,
max_value=10,
shared_dtype=True,
),
mode=st.sampled_from(["valid", "same", "full"]),
test_with_out=st.just(False),
)
def test_numpy_convolve(
dtype_and_x,
mode,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=xs[0],
v=xs[1],
mode=mode,
)
# copysign
@handle_frontend_test(
fn_tree="numpy.copysign",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_value=-100,
max_value=100,
)
],
),
where=np_frontend_helpers.where(),
test_with_out=st.just(False),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="copysign"
),
)
def test_numpy_copysign(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# fabs
@handle_frontend_test(
fn_tree="numpy.fabs",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="fabs"
),
)
def test_numpy_fabs(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# gcd
@handle_frontend_test(
fn_tree="numpy.gcd",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=False,
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=False,
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="gcd"
),
)
def test_numpy_gcd(
dtype_and_inputs,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs = dtype_and_inputs
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
)
# heaviside
@handle_frontend_test(
fn_tree="numpy.heaviside",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="heaviside"
),
)
def test_numpy_heaviside(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, (x1_list, x2_list), casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x1_list,
x2=x2_list,
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# interp
@handle_frontend_test(
fn_tree="numpy.interp",
xp_and_fp=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
min_dim_size=3,
min_value=-10000,
max_value=10000,
),
x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
left=st.one_of(st.none(), st.floats()),
right=st.one_of(st.none(), st.floats()),
period=st.one_of(
st.none(),
st.floats(
allow_nan=False,
allow_infinity=False,
allow_subnormal=False,
min_value=0.1,
max_value=1.0e5,
exclude_min=True,
),
),
test_with_out=st.just(False),
)
def test_numpy_interp(
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
xp_and_fp,
x,
left,
right,
period,
):
input_dtypes, xp_fp = xp_and_fp
xp = ivy.array(xp_fp[0])
fp = ivy.array(xp_fp[1])
if period is None:
xp_order = ivy.argsort(xp)
xp = xp[xp_order]
fp = fp[xp_order]
previous = xp[0]
for i in xp[1:]:
assume(i > previous)
previous = i
x_dtype, x = x
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes + x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
xp=xp,
fp=fp,
left=left,
right=right,
period=period,
)
# lcm
@handle_frontend_test(
fn_tree="numpy.lcm",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=False,
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="lcm"
),
)
def test_numpy_lcm(
dtype_and_inputs,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs = dtype_and_inputs
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
)
# nan_to_num
@handle_frontend_test(
fn_tree="numpy.nan_to_num",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-np.inf,
max_value=+np.inf,
allow_nan=True,
),
posinf=st.one_of(st.none(), st.floats(min_value=0, max_value=10000)),
neginf=st.one_of(st.none(), st.floats(min_value=-10000, max_value=0)),
nan=st.floats(min_value=0, max_value=10),
copy=st.booleans(),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_numpy_nan_to_num(
dtype_and_x,
copy,
nan,
fn_tree,
frontend,
test_flags,
backend_fw,
on_device,
posinf,
neginf,
):
input_dtype, x = dtype_and_x
# to avoid overflow errors of tf as you can't easily create a tensor
# close to tf.dtype.max or tf.dtype.min, we need to assume that
if ivy.current_backend_str() == "tensorflow":
assume(posinf is not None and neginf is not None)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
copy=copy,
nan=nan,
posinf=posinf,
neginf=neginf,
)
# real_if_close
@handle_frontend_test(
fn_tree="numpy.real_if_close",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_dim_size=1,
min_num_dims=1,
).filter(lambda x: "bfloat16" not in x[0]),
tol=st.integers(min_value=1, max_value=1000),
test_with_out=st.just(False),
)
def test_numpy_real_if_close(
dtype_and_x,
tol,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
tol=tol,
)
# reciprocal
@handle_frontend_test(
fn_tree="numpy.reciprocal",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="reciprocal"
),
)
def test_numpy_reciprocal(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
atol=1e-2,
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# sign
@handle_frontend_test(
fn_tree="numpy.sign",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="sign"
),
)
def test_numpy_sign(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# sqrt
@handle_frontend_test(
fn_tree="numpy.sqrt",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="sqrt"
),
)
def test_numpy_sqrt(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
atol=1e-2,
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# square
@handle_frontend_test(
fn_tree="numpy.square",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="square"
),
)
def test_numpy_square(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_miscellaneous.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_miscellaneous.py",
"repo_id": "ivy",
"token_count": 10471
} | 56 |
import hypothesis.extra.numpy as hnp
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _broadcastable_trio(draw):
dtype = draw(helpers.get_dtypes("valid", full=False))
shapes_st = draw(
hnp.mutually_broadcastable_shapes(num_shapes=3, min_dims=1, min_side=1)
)
cond_shape, x1_shape, x2_shape = shapes_st.input_shapes
cond = draw(hnp.arrays(hnp.boolean_dtypes(), cond_shape))
x1 = draw(helpers.array_values(dtype=dtype[0], shape=x1_shape))
x2 = draw(helpers.array_values(dtype=dtype[0], shape=x2_shape))
return cond, x1, x2, (dtype * 2)
@st.composite
def _extract_strategy(draw):
dtype_and_cond = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
)
)
dtype_and_arr = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
)
)
return dtype_and_cond, dtype_and_arr
# searchsorted
@st.composite
def _search_sorted_values(draw):
case = st.booleans()
if case:
# when x is 1-D and v is N-D
dtype_x, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"numeric", full=False, key="searchsorted"
),
shape=(draw(st.integers(min_value=1, max_value=5)),),
),
)
dtype_v, v = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"numeric", full=False, key="searchsorted"
),
min_num_dims=1,
)
)
else:
# when x is N-D and v is N-D
lead_dim = draw(
helpers.get_shape(min_num_dims=1),
)
nx = draw(st.integers(min_value=1, max_value=5))
nv = draw(st.integers(min_value=1, max_value=5))
dtype_x, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"numeric", full=False, key="searchsorted"
),
shape=lead_dim + (nx,),
),
)
dtype_v, v = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"numeric", full=False, key="searchsorted"
),
shape=lead_dim + (nv,),
),
)
input_dtypes = dtype_x + dtype_v
xs = x + v
side = draw(st.sampled_from(["left", "right"]))
use_sorter = draw(st.booleans())
if use_sorter:
sorter_dtype = draw(st.sampled_from(["int32", "int64"]))
input_dtypes.append(sorter_dtype)
sorter = np.argsort(xs[0], axis=-1).astype(sorter_dtype)
else:
sorter = None
xs[0] = np.sort(xs[0], axis=-1)
return input_dtypes, xs, side, sorter
# --- Main --- #
# ------------ #
# argmax
@handle_frontend_test(
fn_tree="numpy.argmax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_argmax(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keep_dims,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keep_dims,
)
# argmin
@handle_frontend_test(
fn_tree="numpy.argmin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_argmin(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keep_dims,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keep_dims,
)
# argwhere
@handle_frontend_test(
fn_tree="numpy.argwhere",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_with_out=st.just(False),
)
def test_numpy_argwhere(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
# extract
@handle_frontend_test(
fn_tree="numpy.extract",
dtype_and_x=_extract_strategy(),
test_with_out=st.just(False),
)
def test_numpy_extract(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype_cond, cond = dtype_and_x[0]
dtype_arr, arr = dtype_and_x[1]
helpers.test_frontend_function(
input_dtypes=dtype_cond + dtype_arr,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
cond=cond[0],
arr=arr[0],
)
# flatnonzero
@handle_frontend_test(
fn_tree="numpy.flatnonzero",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_numpy_flatnonzero(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
# nanargmax
@handle_frontend_test(
fn_tree="numpy.nanargmax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_nanargmax(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keep_dims,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keep_dims,
)
# nanargmin
@handle_frontend_test(
fn_tree="numpy.nanargmin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_nanargmin(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keep_dims,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keep_dims,
)
# nonzero
@handle_frontend_test(
fn_tree="numpy.nonzero",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_with_out=st.just(False),
)
def test_numpy_nonzero(
dtype_and_a,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
)
@handle_frontend_test(
fn_tree="numpy.searchsorted",
dtype_x_v_side_sorter=_search_sorted_values(),
test_with_out=st.just(False),
)
def test_numpy_searchsorted(
dtype_x_v_side_sorter,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, side, sorter = dtype_x_v_side_sorter
helpers.test_frontend_function(
input_dtypes=input_dtypes + ["int64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=xs[0],
v=xs[1],
side=side,
sorter=sorter,
)
# where
@handle_frontend_test(
fn_tree="numpy.where",
broadcastables=_broadcastable_trio(),
test_with_out=st.just(False),
)
def test_numpy_where(
broadcastables,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
cond, x1, x2, dtype = broadcastables
helpers.test_frontend_function(
input_dtypes=["bool", dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
cond=cond,
x1=x1,
x2=x2,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_sorting_searching_counting/test_searching.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_sorting_searching_counting/test_searching.py",
"repo_id": "ivy",
"token_count": 5148
} | 57 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.helpers.globals as test_globals
from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler
# --- Helpers --- #
# --------------- #
@st.composite
def _input_fill_and_dtype(draw):
dtype = draw(helpers.get_dtypes("float", full=False))
with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend:
dtype_and_input = draw(helpers.dtype_and_values(dtype=dtype))
if ivy_backend.is_uint_dtype(dtype[0]):
fill_values = draw(st.integers(min_value=0, max_value=5))
elif ivy_backend.is_int_dtype(dtype[0]):
fill_values = draw(st.integers(min_value=-5, max_value=5))
else:
fill_values = draw(st.floats(min_value=-5, max_value=5))
dtype_to_cast = draw(helpers.get_dtypes("float", full=False))
return dtype, dtype_and_input[1], fill_values, dtype_to_cast[0]
# --- Main --- #
# ------------ #
# arange
@handle_frontend_test(
fn_tree="paddle.arange",
start=helpers.ints(min_value=-50, max_value=0),
end=helpers.ints(min_value=1, max_value=50),
step=helpers.ints(min_value=1, max_value=5),
dtype=helpers.get_dtypes("float"),
test_with_out=st.just(False),
)
def test_paddle_arange(
start,
end,
step,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
end=end,
step=step,
dtype=dtype[0],
)
# assign
@handle_frontend_test(
fn_tree="paddle.assign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(True),
)
def test_paddle_assign(
dtype_and_x,
test_flags,
backend_fw,
frontend,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
output=x[1],
)
# clone
@handle_frontend_test(
fn_tree="paddle.clone",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
test_with_copy=st.just(True),
)
def test_paddle_clone(
*,
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# complex
@handle_frontend_test(
fn_tree="paddle.complex",
dtype_and_arrays=helpers.dtype_and_values(
available_dtypes=["float32", "float64"], shared_dtype=True, num_arrays=2
),
)
def test_paddle_complex(
dtype_and_arrays,
test_flags,
backend_fw,
frontend,
fn_tree,
on_device,
):
input_dtype, (real, imag) = dtype_and_arrays
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
real=real,
imag=imag,
)
# diag
@handle_frontend_test(
fn_tree="paddle.diag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=2,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-1, max_value=1),
p=st.one_of(
helpers.ints(min_value=-25, max_value=25),
helpers.floats(min_value=-25, max_value=25),
),
)
def test_paddle_diag(
dtype_and_x,
k,
p,
backend_fw,
frontend,
test_flags,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
offset=k,
padding_value=p,
)
# diagflat
@handle_frontend_test(
fn_tree="paddle.diagflat",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
offset=st.integers(min_value=-4, max_value=4),
test_with_out=st.just(False),
)
def test_paddle_diagflat(
dtype_and_values,
offset,
test_flags,
backend_fw,
frontend,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
x=x[0],
offset=offset,
)
# empty
@handle_frontend_test(
fn_tree="paddle.empty",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_paddle_empty(
shape,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
shape=shape,
dtype=dtype[0],
)
# empty_like
@handle_frontend_test(
fn_tree="paddle.empty_like",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_paddle_empty_like(
dtype_and_x,
dtype,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
x=x[0],
dtype=dtype[0],
)
# eye
@handle_frontend_test(
fn_tree="paddle.eye",
num_rows=helpers.ints(min_value=3, max_value=10),
num_columns=st.none() | helpers.ints(min_value=3, max_value=10),
dtypes=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_paddle_eye(
*,
num_rows,
num_columns,
dtypes,
on_device,
fn_tree,
test_flags,
frontend,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
num_rows=num_rows,
num_columns=num_columns,
dtype=dtypes[0],
)
# full
@handle_frontend_test(
fn_tree="paddle.full",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
input_fill_dtype=_input_fill_and_dtype(),
test_with_out=st.just(False),
)
def test_paddle_full(
shape,
input_fill_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, fill, dtype_to_cast = input_fill_dtype
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
fill_value=fill,
dtype=dtype_to_cast,
)
# full_like
@handle_frontend_test(
fn_tree="paddle.full_like",
input_fill_dtype=_input_fill_and_dtype(),
test_with_out=st.just(False),
)
def test_paddle_full_like(
input_fill_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, fill, dtype_to_cast = input_fill_dtype
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
fill_value=fill,
dtype=dtype_to_cast,
)
# linspace
@handle_frontend_test(
fn_tree="paddle.linspace",
start=helpers.floats(min_value=-10, max_value=10),
stop=helpers.floats(min_value=-10, max_value=10),
num=helpers.ints(min_value=1, max_value=5),
dtype=helpers.get_dtypes("float"),
test_with_out=st.just(False),
)
def test_paddle_linspace(
start,
stop,
num,
dtype,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
stop=stop,
num=num,
dtype=dtype[0],
)
# logspace
@handle_frontend_test(
fn_tree="paddle.logspace",
start=helpers.floats(min_value=-10, max_value=10),
stop=helpers.floats(min_value=-10, max_value=10),
num=helpers.ints(min_value=1, max_value=5),
base=st.floats(min_value=0.1, max_value=10.0),
dtype=helpers.get_dtypes("float"),
test_with_out=st.just(False),
)
def test_paddle_logspace(
start,
stop,
num,
base,
dtype,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
stop=stop,
num=num,
base=base,
dtype=dtype[0],
)
@handle_frontend_test(
fn_tree="paddle.meshgrid",
dtype_and_arrays=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=st.integers(min_value=2, max_value=5),
min_num_dims=1,
max_num_dims=1,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_paddle_meshgrid(
dtype_and_arrays,
test_flags,
backend_fw,
frontend,
fn_tree,
on_device,
):
input_dtype, arrays = dtype_and_arrays
args = {}
i = 0
for x_ in arrays:
args[f"x{i}"] = x_
i += 1
test_flags.num_positional_args = len(arrays)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**args,
)
# ones
@handle_frontend_test(
fn_tree="paddle.ones",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid"),
test_with_out=st.just(False),
)
def test_paddle_ones(
shape,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
dtype=dtype[0],
)
# ones_like
@handle_frontend_test(
fn_tree="paddle.ones_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
dtype=helpers.get_dtypes("valid"),
test_with_out=st.just(False),
)
def test_paddle_ones_like(
dtype_and_x,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
dtype=dtype[0],
)
# Tests #
# ----- #
# to_tensor
@handle_frontend_test(
fn_tree="paddle.to_tensor",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
dtype=helpers.get_dtypes("valid"),
)
def test_paddle_to_tensor(
*,
dtype_and_x,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
data=input[0],
dtype=dtype[0],
place=on_device,
)
# tril
@handle_frontend_test(
fn_tree="paddle.tril",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
),
diagonal=st.integers(min_value=-100, max_value=100),
)
def test_paddle_tril(
*,
dtype_and_values,
diagonal,
backend_fw,
on_device,
fn_tree,
frontend,
test_flags,
):
dtype, values = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=values[0],
diagonal=diagonal,
)
# tril_indices
@handle_frontend_test(
fn_tree="paddle.tril_indices",
dtype=helpers.get_dtypes("valid", full=False),
row=st.integers(min_value=1, max_value=5),
col=st.integers(min_value=1, max_value=5),
offset=st.integers(min_value=-4, max_value=4),
test_with_out=st.just(False),
)
def test_paddle_tril_indices(
row,
col,
offset,
dtype,
test_flags,
backend_fw,
frontend,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
row=row,
col=col,
offset=offset,
dtype=dtype[0],
)
# triu
@handle_frontend_test(
fn_tree="paddle.triu",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
),
diagonal=st.integers(min_value=-100, max_value=100),
)
def test_paddle_triu(
*,
dtype_and_values,
diagonal,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
dtype, values = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=values[0],
diagonal=diagonal,
)
# triu_indices
@handle_frontend_test(
fn_tree="paddle.triu_indices",
dtype=helpers.get_dtypes("valid", full=False),
row=st.integers(min_value=1, max_value=5),
col=st.integers(min_value=1, max_value=5),
offset=st.integers(min_value=-4, max_value=4),
test_with_out=st.just(False),
)
def test_paddle_triu_indices(
row,
col,
offset,
dtype,
test_flags,
backend_fw,
frontend,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
row=row,
col=col,
offset=offset,
dtype=dtype[0],
)
# zeros
@handle_frontend_test(
fn_tree="paddle.zeros",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid"),
test_with_out=st.just(False),
)
def test_paddle_zeros(
shape,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
dtype=dtype[0],
)
# zeros_like
@handle_frontend_test(
fn_tree="paddle.zeros_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
dtype=helpers.get_dtypes("valid"),
test_with_out=st.just(False),
)
def test_paddle_zeros_like(
dtype_and_x,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
dtype=dtype[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_creation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_creation.py",
"repo_id": "ivy",
"token_count": 9079
} | 58 |
# global
import ivy
from hypothesis import assume, strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _affine_grid_helper(draw):
align_corners = draw(st.booleans())
dims = draw(st.integers(4, 5))
if dims == 4:
size = draw(
st.tuples(
st.integers(1, 20),
st.integers(1, 20),
st.integers(2, 20),
st.integers(2, 20),
)
)
theta_dtype, theta = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=1,
shape=(size[0], 2, 3),
)
)
return theta_dtype, theta[0], size, align_corners
else:
size = draw(
st.tuples(
st.integers(1, 20),
st.integers(1, 20),
st.integers(2, 20),
st.integers(2, 20),
st.integers(2, 20),
)
)
theta_dtype, theta = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=1,
shape=(size[0], 3, 4),
)
)
return theta_dtype, theta[0], size, align_corners
@st.composite
def _image_shape_helper(draw, data_format):
n = draw(helpers.ints(min_value=1, max_value=10), label="batch")
c = draw(st.sampled_from([1, 3]), label="channel")
h = draw(helpers.ints(min_value=1, max_value=100), label="height")
w = draw(helpers.ints(min_value=1, max_value=100), label="width")
if data_format == "NCHW":
shape = (n, c, h, w)
else:
shape = (n, h, w, c)
return shape
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="paddle.nn.functional.affine_grid",
dtype_and_input_and_other=_affine_grid_helper(),
)
def test_paddle_affine_grid(
*, dtype_and_input_and_other, on_device, backend_fw, fn_tree, frontend, test_flags
):
dtype, theta, size, align_corners = dtype_and_input_and_other
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
theta=theta,
out_shape=size,
align_corners=align_corners,
)
# channel_shuffle
@handle_frontend_test(
fn_tree="paddle.nn.functional.channel_shuffle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
shape=_image_shape_helper(data_format=st.sampled_from(["NCHW", "NHWC"])),
),
groups=helpers.ints(min_value=1),
data_format=st.sampled_from(["NCHW", "NHWC"]),
)
def test_paddle_channel_shuffle(
*,
dtype_and_x,
groups,
data_format,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
if data_format == "NCHW":
assume(ivy.shape(x[0])[1] % groups == 0)
else:
assume(ivy.shape(x[0])[3] % groups == 0)
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
on_device=on_device,
frontend=frontend,
fn_tree=fn_tree,
x=x[0],
groups=groups,
data_format=data_format,
)
# pixel_shuffle
@handle_frontend_test(
fn_tree="paddle.nn.functional.pixel_shuffle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
min_value=0,
min_num_dims=4,
max_num_dims=4,
min_dim_size=3,
),
factor=helpers.ints(min_value=1),
data_format=st.sampled_from(["NCHW", "NHWC"]),
)
def test_paddle_pixel_shuffle(
*,
dtype_and_x,
factor,
data_format,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
if data_format == "NCHW":
assume(ivy.shape(x[0])[1] % (factor**2) == 0)
else:
assume(ivy.shape(x[0])[3] % (factor**2) == 0)
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
upscale_factor=factor,
data_format=data_format,
backend_to_test=backend_fw,
)
# pixel_unshuffle
@handle_frontend_test(
fn_tree="paddle.nn.functional.pixel_unshuffle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
min_value=0,
min_num_dims=4,
max_num_dims=4,
min_dim_size=3,
),
factor=helpers.ints(min_value=1),
data_format=st.sampled_from(["NCHW", "NHWC"]),
)
def test_paddle_pixel_unshuffle(
*,
dtype_and_x,
factor,
data_format,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
if data_format == "NCHW":
assume(ivy.shape(x[0])[2] % factor == 0)
assume(ivy.shape(x[0])[3] % factor == 0)
else:
assume(ivy.shape(x[0])[1] % factor == 0)
assume(ivy.shape(x[0])[2] % factor == 0)
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
downscale_factor=factor,
data_format=data_format,
backend_to_test=backend_fw,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_vision.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_vision.py",
"repo_id": "ivy",
"token_count": 2970
} | 59 |
from ivy_tests.test_ivy.test_frontends import NativeClass
scipy_classes_to_ivy_classes = {}
def convscipy(argument):
"""Convert NativeClass in argument to ivy frontend counterpart for
scipy."""
if isinstance(argument, NativeClass):
return scipy_classes_to_ivy_classes.get(argument._native_class)
return argument
| ivy/ivy_tests/test_ivy/test_frontends/test_scipy/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_scipy/__init__.py",
"repo_id": "ivy",
"token_count": 121
} | 60 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _valid_idct(draw):
dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
max_value=65280,
min_value=-65280,
min_num_dims=1,
min_dim_size=2,
shared_dtype=True,
)
)
n = None
axis = -1
norm = draw(st.sampled_from([None, "ortho"]))
type = draw(helpers.ints(min_value=1, max_value=4))
if norm == "ortho" and type == 1:
norm = None
return dtype, x, type, n, axis, norm
@st.composite
def _valid_stft(draw):
dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
max_value=65280,
min_value=-65280,
min_num_dims=1,
min_dim_size=2,
shared_dtype=True,
)
)
frame_length = draw(helpers.ints(min_value=16, max_value=100))
frame_step = draw(helpers.ints(min_value=1, max_value=50))
return dtype, x, frame_length, frame_step
# --- Main --- #
# ------------ #
# dct
@handle_frontend_test(
fn_tree="tensorflow.signal.dct",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
max_value=65280,
min_value=-65280,
min_num_dims=1,
min_dim_size=2,
shared_dtype=True,
),
n=helpers.ints(min_value=1, max_value=3),
norm=st.sampled_from([None, "ortho"]),
type=helpers.ints(min_value=1, max_value=4),
# dtype_x_and_args=_valid_idct(),
test_with_out=st.just(False),
)
def test_tensorflow_dct(
*,
dtype_and_x,
n,
norm,
type,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(
input_dtype,
x,
) = dtype_and_x
if norm == "ortho" and type == 1:
norm = None
axis = -1
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
type=type,
n=n,
axis=axis,
norm=norm,
# atol=1e-01,
)
# idct
@handle_frontend_test(
fn_tree="tensorflow.signal.idct",
dtype_x_and_args=_valid_idct(),
test_with_out=st.just(False),
)
def test_tensorflow_idct(
*,
dtype_x_and_args,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, type, n, axis, norm = dtype_x_and_args
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
type=type,
n=n,
axis=axis,
norm=norm,
atol=1e-01,
)
# kaiser_bessel_derived_window
@handle_frontend_test(
fn_tree="tensorflow.signal.kaiser_bessel_derived_window",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
max_num_dims=0,
min_value=1,
max_value=10,
),
beta=st.floats(min_value=1, max_value=5),
# dtype=helpers.get_dtypes("float", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_kaiser_bessel_derived_window(
*,
dtype_and_x,
beta,
test_flags,
backend_fw,
fn_tree,
on_device,
frontend, # dtype
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
window_length=int(x[0]),
beta=beta,
# dtype=dtype[0],
)
# kaiser_window
@handle_frontend_test(
fn_tree="tensorflow.signal.kaiser_window",
dtype_and_window_length=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer")
),
dtype_and_beta=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
dtype=helpers.get_dtypes("numeric"),
test_with_out=st.just(False),
)
def test_tensorflow_kaiser_window(
*,
dtype_and_window_length,
dtype_and_beta,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
window_length_dtype, window_length = dtype_and_window_length
beta_dtype, beta = dtype_and_beta
helpers.test_frontend_function(
input_dtypes=[window_length_dtype[0], beta_dtype[0]],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
window_length=window_length,
beta=beta,
dtype=dtype,
)
# test stft
@handle_frontend_test(
fn_tree="tensorflow.signal.stft",
dtype_x_and_args=_valid_stft(),
test_with_out=st.just(False),
)
def test_tensorflow_stft(
*,
dtype_x_and_args,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, frame_length, frame_step = dtype_x_and_args
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
signals=x[0],
frame_length=frame_length,
frame_step=frame_step,
fft_length=None,
window_fn=None,
pad_end=True,
atol=1e-02,
rtol=1e-02,
)
# vorbis_window
@handle_frontend_test(
fn_tree="tensorflow.signal.vorbis_window",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
max_num_dims=0,
min_value=1,
max_value=10,
),
# dtype=helpers.get_dtypes("float", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_vorbis_window(
*, dtype_and_x, test_flags, backend_fw, fn_tree, on_device, frontend # ,dtype
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-02,
window_length=int(x[0]),
# dtype=dtype[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_signal.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_signal.py",
"repo_id": "ivy",
"token_count": 3359
} | 61 |
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@handle_frontend_test(
fn_tree="torch.special.erfcx",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_erfcx(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_special_funcs.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_special_funcs.py",
"repo_id": "ivy",
"token_count": 335
} | 62 |
"""Collection of tests for elementwise functions."""
# global
import math
import numpy as np
from hypothesis import assume
from hypothesis import strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.helpers.globals as test_globals
from ivy_tests.test_ivy.helpers import handle_test
from ivy_tests.test_ivy.helpers.pipeline_helper import BackendHandler
_one = np.asarray(1, dtype="uint8")
_zero = np.asarray(0, dtype="uint8")
# --- Helpers --- #
# --------------- #
# this is not used yet and will be used when ``where`` argument is added
# back to elementwise functions
@st.composite
def _array_with_mask(draw):
dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), ret_shape=True
)
)
dtype2, where = draw(
helpers.dtype_and_values(available_dtypes=["bool"], shape=shape)
)
return ([dtype[0], dtype2[0]], x, where)
# trapz
@st.composite
def _either_x_dx(draw):
rand = (draw(st.integers(min_value=0, max_value=1)),)
if rand == 0:
either_x_dx = draw(
helpers.dtype_and_values(
available_dtypes=st.shared(
helpers.get_dtypes("float"), key="trapz_dtype"
),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
)
)
return rand, either_x_dx
else:
either_x_dx = draw(
st.floats(min_value=-10, max_value=10),
)
return rand, either_x_dx
@st.composite
def min_max_helper(draw):
use_where = draw(st.booleans())
if use_where:
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", full=False),
num_arrays=2,
small_abs_safety_factor=6,
large_abs_safety_factor=6,
safety_factor_scale="log",
)
)
else:
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", full=False),
num_arrays=2,
min_value=-1e5,
max_value=1e5,
small_abs_safety_factor=6,
large_abs_safety_factor=6,
safety_factor_scale="log",
)
)
return dtype_and_x, use_where
@st.composite
def pow_helper(draw, available_dtypes=None):
if available_dtypes is None:
available_dtypes = helpers.get_dtypes("numeric")
dtype1, x1 = draw(
helpers.dtype_and_values(
available_dtypes=available_dtypes,
small_abs_safety_factor=16,
large_abs_safety_factor=16,
safety_factor_scale="log",
)
)
dtype1 = dtype1[0]
def cast_filter(dtype1_x1_dtype2):
dtype1, _, dtype2 = dtype1_x1_dtype2
with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend:
if ivy_backend.can_cast(dtype1, dtype2):
return True
return False
dtype1, x1, dtype2 = draw(
helpers.get_castable_dtype(draw(available_dtypes), dtype1, x1).filter(
cast_filter
)
)
with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend:
if ivy_backend.is_int_dtype(dtype2):
max_val = ivy_backend.iinfo(dtype2).max
else:
max_val = ivy_backend.finfo(dtype2).max
max_x1 = np.max(np.abs(x1[0]))
if max_x1 in [0, 1]:
max_value = None
else:
max_value = int(math.log(max_val) / math.log(max_x1))
if abs(max_value) > abs(max_val) / 40 or max_value < 0:
max_value = None
dtype_and_x2 = draw(
st.one_of(
helpers.dtype_and_values(
small_abs_safety_factor=16,
large_abs_safety_factor=16,
safety_factor_scale="log",
max_value=max_value,
dtype=[dtype2],
),
st.floats(max_value=max_value),
st.integers(max_value=max_value),
)
)
input_dtypes = [dtype1]
if isinstance(dtype_and_x2, tuple):
input_dtypes += dtype_and_x2[0]
x2 = dtype_and_x2[1][0]
else:
x2 = dtype_and_x2
return input_dtypes, [x1[0], x2]
# --- Main --- #
# ------------ #
def not_too_close_to_zero(x):
return np.where(np.isclose(x, 0), x + 1, x)
# abs
@handle_test(
fn_tree="functional.ivy.abs",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_abs(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# acos
@handle_test(
fn_tree="functional.ivy.acos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
),
)
def test_acos(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# acosh
@handle_test(
fn_tree="functional.ivy.acosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_value=1,
large_abs_safety_factor=2.1,
small_abs_safety_factor=2.1,
safety_factor_scale="log",
),
)
def test_acosh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# add
@handle_test(
fn_tree="functional.ivy.add",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
alpha=st.integers(min_value=1, max_value=5),
)
def test_add(*, dtype_and_x, alpha, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
rtol_=1e-1,
atol_=1e-1,
on_device=on_device,
x1=x[0],
x2=x[1],
alpha=alpha,
)
# angle
@handle_test(
fn_tree="functional.ivy.angle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float64"],
min_value=-5,
max_value=5,
max_dim_size=5,
max_num_dims=5,
min_dim_size=1,
min_num_dims=1,
allow_inf=False,
allow_nan=False,
),
deg=st.booleans(),
test_gradients=st.just(False),
)
def test_angle(
*,
dtype_and_x,
deg,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, z = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
z=z[0],
deg=deg,
)
# asin
@handle_test(
fn_tree="functional.ivy.asin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
safety_factor_scale="log",
large_abs_safety_factor=4,
small_abs_safety_factor=4,
),
)
def test_asin(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# asinh
@handle_test(
fn_tree="functional.ivy.asinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
),
)
def test_asinh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# atan
@handle_test(
fn_tree="functional.ivy.atan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
),
)
def test_atan(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# atan2
@handle_test(
fn_tree="functional.ivy.atan2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
)
def test_atan2(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
assume(not (np.any(np.isclose(x[0], 0)) or np.any(np.isclose(x[1], 0))))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x1=x[0],
x2=x[1],
)
# atanh
@handle_test(
fn_tree="functional.ivy.atanh",
dtype_and_x=helpers.dtype_and_values(
min_value=1e-30,
max_value=1e30,
available_dtypes=helpers.get_dtypes("float_and_complex"),
),
)
def test_atanh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# bitwise_and
@handle_test(
fn_tree="functional.ivy.bitwise_and",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
array_api_dtypes=True,
),
test_gradients=st.just(False),
)
def test_bitwise_and(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# bitwise_invert
@handle_test(
fn_tree="functional.ivy.bitwise_invert",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
array_api_dtypes=True,
),
test_gradients=st.just(False),
)
def test_bitwise_invert(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# bitwise_left_shift
@handle_test(
fn_tree="functional.ivy.bitwise_left_shift",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
array_api_dtypes=True,
),
ground_truth_backend="numpy", # tensorflow gt has maximum shift that is equal
test_gradients=st.just(False),
)
def test_bitwise_left_shift(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# negative shifts will throw an exception
# shifts >= dtype width produce backend-defined behavior
dtype = np.promote_types(input_dtype[0], input_dtype[1])
bit_cap = (
np.iinfo(dtype).bits
- np.maximum(np.ceil(np.log2(np.abs(x[0]))).astype(input_dtype[1]), 0)
- 1
)
bit_cap = np.iinfo(dtype).bits if "u" in dtype.name else bit_cap
x[1] = np.asarray(
np.clip(
x[1],
0,
bit_cap,
dtype=input_dtype[1],
)
)
helpers.test_function(
# to dtype bits - 1 while other backends overflow to zero
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# bitwise_or
@handle_test(
fn_tree="functional.ivy.bitwise_or",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
array_api_dtypes=True,
),
test_gradients=st.just(False),
)
def test_bitwise_or(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# bitwise_right_shift
@handle_test(
fn_tree="functional.ivy.bitwise_right_shift",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
array_api_dtypes=True,
),
test_gradients=st.just(False),
)
def test_bitwise_right_shift(
*, dtype_and_x, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x = dtype_and_x
# negative shifts will throw an exception
# shifts >= dtype width produce backend-defined behavior
x[1] = np.asarray(
np.clip(x[1], 0, np.iinfo(input_dtype[1]).bits - 1), dtype=input_dtype[1]
)
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# bitwise_xor
@handle_test(
fn_tree="functional.ivy.bitwise_xor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
array_api_dtypes=True,
),
test_gradients=st.just(False),
)
def test_bitwise_xor(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# ceil
@handle_test(
fn_tree="functional.ivy.ceil",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
small_abs_safety_factor=3,
safety_factor_scale="linear",
),
)
def test_ceil(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# cos
@handle_test(
fn_tree="functional.ivy.cos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex")
),
)
def test_cos(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# cosh
@handle_test(
fn_tree="functional.ivy.cosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
),
)
def test_cosh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
@handle_test(
fn_tree="functional.ivy.deg2rad",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
safety_factor_scale="log",
large_abs_safety_factor=2,
small_abs_safety_factor=2,
),
)
def test_deg2rad(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
atol_=1e-2,
rtol_=1e-2,
x=x[0],
)
# divide
@handle_test(
fn_tree="functional.ivy.divide",
test_gradients=st.just(False),
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", full=False),
num_arrays=2,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_divide(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# prevent too close to zero
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# equal
@handle_test(
fn_tree="functional.ivy.equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=False), num_arrays=2
),
test_gradients=st.just(False),
)
def test_equal(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# Extra #
# ------#
# erf
@handle_test(
fn_tree="functional.ivy.erf",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_erf(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# exp
@handle_test(
fn_tree="functional.ivy.exp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex")
),
)
def test_exp(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# exp2
@handle_test(
fn_tree="functional.ivy.exp2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_value=-10,
max_value=10,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
test_gradients=st.just(False),
)
def test_exp2(dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=np.asarray(x[0], dtype=input_dtype[0]),
)
# expm1
@handle_test(
fn_tree="functional.ivy.expm1",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
# Can't use linear or log safety factor, since the function is exponential,
# next best option is a hardcoded maximum that won't break any data type.
# expm1 is designed for very small values anyway
max_value=20.0,
),
)
def test_expm1(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# floor
@handle_test(
fn_tree="functional.ivy.floor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
)
def test_floor(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
@handle_test(
fn_tree="functional.ivy.floor_divide",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=4,
safety_factor_scale="linear",
shared_dtype=True,
),
test_gradients=st.just(False),
)
def test_floor_divide(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# Make sure it's not dividing value too close to zero
assume(not np.any(np.isclose(x[1], 0)))
# Absolute tolerance is 1,
# due to flooring can cause absolute error of 1 due to precision
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
atol_=1,
)
# fmin
@handle_test(
fn_tree="functional.ivy.fmin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-10,
max_value=10,
num_arrays=2,
shared_dtype=False,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
allow_nan=True,
),
test_gradients=st.just(False),
)
def test_fmin(dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x1=x[0],
x2=x[1],
)
# fmod
@handle_test(
fn_tree="functional.ivy.fmod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=False,
large_abs_safety_factor=6,
small_abs_safety_factor=6,
safety_factor_scale="log",
),
test_gradients=st.just(False),
)
def test_fmod(dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# Make sure values is not too close to zero
assume(not np.any(np.isclose(x[0], 0)))
assume(not np.any(np.isclose(x[1], 0)))
# jax raises inconsistent gradients for negative numbers in x1
if (np.any(x[0] < 0) or np.any(x[1] < 0)) and ivy.current_backend_str() == "jax":
test_flags.test_gradients = False
test_flags.as_variable = [test_flags.as_variable, False]
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x1=x[0],
x2=x[1],
)
# gcd
@handle_test(
fn_tree="functional.ivy.gcd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=False,
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=False,
),
test_gradients=st.just(False),
)
def test_gcd(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# greater
@handle_test(
fn_tree="functional.ivy.greater",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
test_gradients=st.just(False),
)
def test_greater(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# bfloat16 is not supported
assume("bfloat16" not in input_dtype)
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# greater_equal
@handle_test(
fn_tree="functional.ivy.greater_equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
test_gradients=st.just(False),
)
def test_greater_equal(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# bfloat16 is not supported by numpy
assume("bfloat16" not in input_dtype)
# make sure they're not too close together
assume(not (np.any(np.isclose(x[0], x[1])) or np.any(np.isclose(x[1], x[0]))))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# imag
@handle_test(
fn_tree="functional.ivy.imag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-5,
max_value=5,
max_dim_size=5,
max_num_dims=5,
min_dim_size=1,
min_num_dims=1,
allow_inf=False,
allow_nan=False,
),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_imag(
*,
dtype_and_x,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
val=x[0],
)
# isfinite
@handle_test(
fn_tree="functional.ivy.isfinite",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
test_gradients=st.just(False),
)
def test_isfinite(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# isinf
@handle_test(
fn_tree="functional.ivy.isinf",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
detect_positive=st.booleans(),
detect_negative=st.booleans(),
test_gradients=st.just(False),
)
def test_isinf(
*,
dtype_and_x,
detect_positive,
detect_negative,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
detect_positive=detect_positive,
detect_negative=detect_negative,
)
# isnan
@handle_test(
fn_tree="functional.ivy.isnan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
test_gradients=st.just(False),
)
def test_isnan(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# isreal
@handle_test(
fn_tree="functional.ivy.isreal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("real_and_complex")
),
test_gradients=st.just(False),
)
def test_isreal(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# lcm
@handle_test(
fn_tree="functional.ivy.lcm",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["int16", "int32", "int64"],
num_arrays=2,
shared_dtype=False,
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=False,
),
test_gradients=st.just(False),
)
def test_lcm(dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x1=x[0],
x2=x[1],
)
# less
@handle_test(
fn_tree="functional.ivy.less",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
min_num_dims=1,
),
test_gradients=st.just(False),
)
def test_less(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# bfloat16 is not supported by numpy
assume("bfloat16" not in input_dtype)
# make sure they're not too close together
assume(not (np.any(np.isclose(x[0], x[1])) or np.any(np.isclose(x[1], x[0]))))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# less_equal
@handle_test(
fn_tree="functional.ivy.less_equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
test_gradients=st.just(False),
ground_truth_backend="jax",
)
def test_less_equal(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# bfloat16 is not supported by numpy
assume("bfloat16" not in input_dtype)
# make sure they're not too close together
assume(not (np.any(np.isclose(x[0], x[1])) or np.any(np.isclose(x[1], x[0]))))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# log
@handle_test(
fn_tree="functional.ivy.log",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
safety_factor_scale="log",
),
)
def test_log(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# avoid logging values too close to zero
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# log10
@handle_test(
fn_tree="functional.ivy.log10",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
safety_factor_scale="log",
),
)
def test_log10(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# avoid logging values too close to zero
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# log1p
@handle_test(
fn_tree="functional.ivy.log1p",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
small_abs_safety_factor=2,
large_abs_safety_factor=2.1,
safety_factor_scale="log",
),
)
def test_log1p(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# avoid logging values too close to zero
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# log2
@handle_test(
fn_tree="functional.ivy.log2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
safety_factor_scale="log",
),
)
def test_log2(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# avoid logging values too close to zero
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
x=x[0],
)
# logaddexp
@handle_test(
fn_tree="functional.ivy.logaddexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
abs_smallest_val=0.137,
min_value=-80,
max_value=80,
),
)
def test_logaddexp(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
x1=x[0],
x2=x[1],
)
# logaddexp2
@handle_test(
fn_tree="functional.ivy.logaddexp2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=False,
),
test_gradients=st.just(False),
)
def test_logaddexp2(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-02,
atol_=1e-02,
x1=x[0],
x2=x[1],
)
# logical_and
@handle_test(
fn_tree="functional.ivy.logical_and",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2
),
test_gradients=st.just(False),
)
def test_logical_and(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# logical_not
@handle_test(
fn_tree="functional.ivy.logical_not",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
test_gradients=st.just(False),
)
def test_logical_not(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# logical_or
@handle_test(
fn_tree="functional.ivy.logical_or",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2
),
test_gradients=st.just(False),
)
def test_logical_or(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# logical_xor
@handle_test(
fn_tree="functional.ivy.logical_xor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2
),
test_gradients=st.just(False),
)
def test_logical_xor(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# maximum
@handle_test(
fn_tree="functional.ivy.maximum",
dtype_and_x_and_use_where=min_max_helper(),
test_gradients=st.just(False),
ground_truth_backend="jax",
)
def test_maximum(
*, dtype_and_x_and_use_where, test_flags, backend_fw, fn_name, on_device
):
(input_dtype, x), use_where = dtype_and_x_and_use_where
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x1=x[0],
x2=x[1],
use_where=use_where,
)
# minimum
@handle_test(
fn_tree="functional.ivy.minimum",
dtype_and_x_and_use_where=min_max_helper(),
test_gradients=st.just(False),
ground_truth_backend="jax",
)
def test_minimum(
*, dtype_and_x_and_use_where, test_flags, backend_fw, fn_name, on_device
):
(input_dtype, x), use_where = dtype_and_x_and_use_where
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x1=x[0],
x2=x[1],
use_where=use_where,
)
# multiply
@handle_test(
fn_tree="functional.ivy.multiply",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2
),
ground_truth_backend="torch",
)
def test_multiply(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# nan_to_num
@handle_test(
fn_tree="functional.ivy.nan_to_num",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=3,
min_value=-100,
max_value=100,
allow_nan=True,
allow_inf=True,
),
copy=st.booleans(),
nan=st.floats(min_value=0.0, max_value=100),
posinf=st.floats(min_value=5e100, max_value=5e100),
neginf=st.floats(min_value=-5e100, max_value=-5e100),
test_gradients=st.just(False),
test_with_copy=st.just(True),
)
def test_nan_to_num(
*,
dtype_and_x,
copy,
nan,
posinf,
neginf,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
copy=copy,
nan=nan,
posinf=posinf,
neginf=neginf,
)
# negative
@handle_test(
fn_tree="functional.ivy.negative",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
)
def test_negative(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# not_equal
@handle_test(
fn_tree="functional.ivy.not_equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", full=True), num_arrays=2
),
test_gradients=st.just(False),
)
def test_not_equal(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# positive
@handle_test(
fn_tree="functional.ivy.positive",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
)
def test_positive(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# pow
@handle_test(
fn_tree="functional.ivy.pow",
dtype_and_x=pow_helper(),
test_gradients=st.just(False),
ground_truth_backend="numpy",
)
def test_pow(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
try:
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x1=x[0],
x2=x[1],
)
except Exception as e:
if any(
error_string in str(e)
for error_string in ["overflow", "too large to convert to"]
):
assume(False)
else:
raise
@handle_test(
fn_tree="functional.ivy.rad2deg",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_rad2deg(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
rtol_=1e-2,
atol_=1e-2,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# real
@handle_test(
fn_tree="functional.ivy.real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("real_and_complex")
),
)
def test_real(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# reciprocal
@handle_test(
fn_tree="functional.ivy.reciprocal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
small_abs_safety_factor=4,
large_abs_safety_factor=4,
safety_factor_scale="log",
num_arrays=1,
),
)
def test_reciprocal(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
x=x[0],
)
# remainder
@handle_test(
fn_tree="functional.ivy.remainder",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=6,
small_abs_safety_factor=6,
safety_factor_scale="log",
),
modulus=st.booleans(),
)
def test_remainder(*, dtype_and_x, modulus, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# Make sure values is not too close to zero
assume(not np.any(np.isclose(x[0], 0)))
assume(not np.any(np.isclose(x[1], 0)))
# jax raises inconsistent gradients for negative numbers in x1
if (np.any(x[0] < 0) or np.any(x[1] < 0)) and ivy.current_backend_str() == "jax":
test_flags.test_gradients = False
test_flags.as_variable = [test_flags.as_variable, False]
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x1=x[0],
x2=x[1],
rtol_=1e-2,
atol_=1e-2,
modulus=modulus,
)
# round
@handle_test(
fn_tree="functional.ivy.round",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
decimals=st.integers(min_value=0, max_value=5),
ground_truth_backend="numpy",
)
def test_round(*, dtype_and_x, decimals, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
decimals=decimals,
)
# sign
@handle_test(
fn_tree="functional.ivy.sign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=5,
small_abs_safety_factor=5,
safety_factor_scale="log",
),
np_variant=st.booleans(),
)
def test_sign(*, dtype_and_x, np_variant, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
np_variant=np_variant,
)
# sin
@handle_test(
fn_tree="functional.ivy.sin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex")
),
)
def test_sin(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
if "paddle" in backend_fw and input_dtype[0] == "float16":
assume(not test_flags.test_gradients)
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# sinh
@handle_test(
fn_tree="functional.ivy.sinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex")
),
)
def test_sinh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# sqrt
@handle_test(
fn_tree="functional.ivy.sqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
allow_inf=False,
# Safety factor is to account for complex, where taking square root
# involves taking absolute value first
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
).filter(lambda x: x[0][0] not in ["bfloat16"]),
)
def test_sqrt(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
# square
@handle_test(
fn_tree="functional.ivy.square",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_square(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# subtract
@handle_test(
fn_tree="functional.ivy.subtract",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
alpha=st.integers(min_value=1, max_value=5),
)
def test_subtract(*, dtype_and_x, alpha, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
x1=x[0],
x2=x[1],
alpha=alpha,
)
# tan
@handle_test(
fn_tree="functional.ivy.tan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex")
),
)
def test_tan(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
x=x[0],
)
# tanh
@handle_test(
fn_tree="functional.ivy.tanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex")
),
complex_mode=st.sampled_from(["jax", "split", "magnitude"]),
)
def test_tanh(*, dtype_and_x, complex_mode, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
complex_mode=complex_mode,
atol_=1e-02, # for `test_flags.test_gradients and 'bfloat16' in input_dtype`
)
@handle_test(
fn_tree="functional.ivy.trapz",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=st.shared(helpers.get_dtypes("float"), key="trapz_dtype"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
allow_neg_axes=True,
valid_axis=True,
force_int_axis=True,
),
rand_either=_either_x_dx(),
test_gradients=st.just(False),
)
def test_trapz(
dtype_values_axis, rand_either, test_flags, backend_fw, fn_name, on_device
):
input_dtype, y, axis = dtype_values_axis
rand, either_x_dx = rand_either
if rand == 0:
dtype_x, x = either_x_dx
x = np.asarray(x, dtype=dtype_x)
dx = None
else:
x = None
dx = either_x_dx
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
rtol_=1e-1,
atol_=1e-1,
y=np.asarray(y[0], dtype=input_dtype[0]),
x=x,
dx=dx,
axis=axis,
)
# trunc
@handle_test(
fn_tree="functional.ivy.trunc",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
)
def test_trunc(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
)
# trunc_divide
@handle_test(
fn_tree="functional.ivy.trunc_divide",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_trunc_divide(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
# prevent too close to zero
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x1=x[0],
x2=x[1],
)
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py",
"repo_id": "ivy",
"token_count": 27905
} | 63 |
# global
import os
import queue
import pytest
import random
import numpy as np
import multiprocessing
import pickle
# local
import ivy
from ivy.functional.ivy.gradients import _variable
from ivy.data_classes.container import Container
from ivy.utils.exceptions import IvyException
def test_container_all_false(on_device):
assert Container({"a": False, "b": {"c": [], "d": 0}}).cont_all_false()
assert not Container({"a": False, "b": {"c": [1], "d": 0}}).cont_all_false()
# noinspection PyBroadException
try:
assert Container(
{"a": ivy.array([1], device=on_device), "b": {"c": [1], "d": True}}
).cont_all_false(assert_is_bool=True)
error_raised = False
except IvyException:
error_raised = True
assert error_raised
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_all_key_chains(include_empty, on_device):
a_val = Container() if include_empty else ivy.array([1], device=on_device)
bc_val = Container() if include_empty else ivy.array([2], device=on_device)
bd_val = Container() if include_empty else ivy.array([3], device=on_device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
kcs = container.cont_all_key_chains(include_empty)
assert kcs[0] == "a"
assert kcs[1] == "b/c"
assert kcs[2] == "b/d"
def test_container_all_true(on_device):
assert not Container(
{"a": ivy.array([1], device=on_device), "b": {"c": [], "d": True}}
).cont_all_true()
assert Container(
{"a": ivy.array([1], device=on_device), "b": {"c": [1], "d": True}}
).cont_all_true()
# noinspection PyBroadException
try:
assert Container(
{"a": ivy.array([1], device=on_device), "b": {"c": [1], "d": True}}
).cont_all_true(assert_is_bool=True)
error_raised = False
except IvyException:
error_raised = True
assert error_raised
def test_container_as_bools(on_device):
dict_in = {"a": ivy.array([1], device=on_device), "b": {"c": [], "d": True}}
container = Container(dict_in)
container_bools = container.cont_as_bools()
assert container_bools["a"] is True
assert container_bools.a is True
assert container_bools["b"]["c"] is False
assert container_bools.b.c is False
assert container_bools["b"]["d"] is True
assert container_bools.b.d is True
def test_container_assert_contains(on_device):
arr0 = ivy.array([0.0], device=on_device)
arr1 = ivy.array([1.0], device=on_device)
arr2 = ivy.array([2.0], device=on_device)
sub_cont = Container({"c": arr1, "d": arr2})
container = Container({"a": arr0, "b": sub_cont})
# keys
assert "a" in container
assert "b" in container
assert "c" not in container
assert "b/c" in container
assert "d" not in container
assert "b/d" in container
# sub-container
container.cont_assert_contains_sub_container(container)
container.cont_assert_contains_sub_container(sub_cont)
assert sub_cont in container
# partial sub-container
partial_sub_cont = Container({"b": {"d": arr2}})
container.cont_assert_contains_sub_container(container, partial=True)
container.cont_assert_contains_sub_container(partial_sub_cont, partial=True)
try:
partial_sub_cont.cont_assert_contains_sub_container(container, partial=True)
error_caught = False
except IvyException:
error_caught = True
assert error_caught
# sub-structure
sub_struc = Container(
{
"c": ivy.array([3.0], device=on_device),
"d": ivy.array([4.0], device=on_device),
}
)
try:
not container.cont_assert_contains_sub_container(sub_struc)
error_caught = False
except IvyException:
error_caught = True
assert error_caught
assert sub_struc not in container
container.cont_assert_contains_sub_structure(sub_struc)
container.cont_assert_contains_sub_structure(container)
# partial sub-structure
partial_sub_struc = Container({"b": {"d": ivy.array([4.0], device=on_device)}})
container.cont_assert_contains_sub_structure(container, partial=True)
container.cont_assert_contains_sub_structure(partial_sub_struc, partial=True)
try:
partial_sub_struc.cont_assert_contains_sub_structure(container, partial=True)
error_caught = False
except IvyException:
error_caught = True
assert error_caught
def test_container_assert_identical(on_device):
# without key_chains specification
arr1 = ivy.array([1], device=on_device)
arr2 = ivy.array([2], device=on_device)
arr3 = ivy.array([3], device=on_device)
container0 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
container1 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
container2 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container3 = Container({"b": {"d": arr3}})
container4 = Container({"d": arr3})
# the same
ivy.Container.cont_assert_identical([container0, container1])
ivy.Container.cont_assert_identical([container1, container0])
# not the same
try:
ivy.Container.cont_assert_identical([container0, container2])
error_caught = False
except IvyException:
error_caught = True
assert error_caught
try:
ivy.Container.cont_assert_identical([container1, container2])
error_caught = False
except IvyException:
error_caught = True
assert error_caught
# partial
ivy.Container.cont_assert_identical([container0, container3], partial=True)
ivy.Container.cont_assert_identical([container3, container0], partial=True)
try:
ivy.Container.cont_assert_identical([container4, container0], partial=True)
error_caught = False
except IvyException:
error_caught = True
assert error_caught
def test_container_assert_identical_structure(on_device):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container1 = Container(
{
"a": ivy.array([3], device=on_device),
"b": {
"c": ivy.array([4], device=on_device),
"d": ivy.array([5], device=on_device),
},
}
)
container2 = Container(
{
"a": ivy.array([3], device=on_device),
"b": {
"c": ivy.array([4], device=on_device),
"d": ivy.array([5], device=on_device),
"e": ivy.array([6], device=on_device),
},
}
)
container3 = Container(
{
"a": ivy.array([3], device=on_device),
"b": {
"c": ivy.array([4], device=on_device),
"d": ivy.array([5], device=on_device),
},
"e": ivy.array([6], device=on_device),
}
)
container4 = Container({"b": {"d": ivy.array([4], device=on_device)}})
container5 = Container({"d": ivy.array([4], device=on_device)})
# with identical
ivy.Container.cont_assert_identical_structure([container0, container1])
ivy.Container.cont_assert_identical_structure([container1, container0])
ivy.Container.cont_assert_identical_structure([container1, container0, container1])
# without identical
try:
ivy.Container.cont_assert_identical_structure(
[container0, container1, container2, container3]
)
error_caught = False
except IvyException:
error_caught = True
# partial
try:
ivy.Container.cont_assert_identical_structure(
[container0, container1, container2, container3, container4, container5],
partial=True,
)
error_caught = False
except IvyException:
error_caught = True
assert error_caught
try:
ivy.Container.cont_assert_identical_structure(
[container0, container5], partial=True
)
error_caught = False
except IvyException:
error_caught = True
assert error_caught
def test_container_at_key_chain(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
# explicit function call
sub_container = container.cont_at_key_chain("b")
assert np.allclose(ivy.to_numpy(sub_container["c"]), np.array([2]))
sub_container = container.cont_at_key_chain("b/c")
assert np.allclose(ivy.to_numpy(sub_container), np.array([2]))
# overridden built-in function call
sub_container = container["b"]
assert np.allclose(ivy.to_numpy(sub_container["c"]), np.array([2]))
sub_container = container["b/c"]
assert np.allclose(ivy.to_numpy(sub_container), np.array([2]))
def test_container_at_key_chains(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
target_cont = Container({"a": True, "b": {"c": True}})
new_container = container.cont_at_key_chains(target_cont)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
new_container = container.cont_at_key_chains(["b/c", "b/d"])
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
new_container = container.cont_at_key_chains("b/c")
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
def test_container_at_keys(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
new_container = container.cont_at_keys(["a", "c"])
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
new_container = container.cont_at_keys("c")
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
new_container = container.cont_at_keys(["b"])
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
def test_container_combine(on_device):
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=on_device),
"b": {
"c": ivy.array([5], device=on_device),
"e": ivy.array([6], device=on_device),
},
}
)
container_comb = ivy.Container.cont_combine(container_0, container_1)
assert np.equal(ivy.to_numpy(container_comb.a), np.array([4]))
assert np.equal(ivy.to_numpy(container_comb.b.c), np.array([5]))
assert np.equal(ivy.to_numpy(container_comb.b.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_comb.b.e), np.array([6]))
def test_container_common_key_chains(on_device):
arr1 = ivy.array([1], device=on_device)
arr2 = ivy.array([2], device=on_device)
arr3 = ivy.array([3], device=on_device)
cont0 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
cont1 = Container({"b": {"c": arr2, "d": arr3, "e": arr1}})
cont2 = Container({"a": arr1, "b": {"d": arr3, "e": arr1}})
# 0
common_kcs = Container.cont_common_key_chains([cont0])
assert len(common_kcs) == 3
assert "a" in common_kcs
assert "b/c" in common_kcs
assert "b/d" in common_kcs
# 0-1
common_kcs = Container.cont_common_key_chains([cont0, cont1])
assert len(common_kcs) == 2
assert "b/c" in common_kcs
assert "b/d" in common_kcs
# 0-2
common_kcs = Container.cont_common_key_chains([cont0, cont2])
assert len(common_kcs) == 2
assert "a" in common_kcs
assert "b/d" in common_kcs
# 1-2
common_kcs = Container.cont_common_key_chains([cont1, cont2])
assert len(common_kcs) == 2
assert "b/d" in common_kcs
assert "b/e" in common_kcs
# all
common_kcs = Container.cont_common_key_chains([cont0, cont1, cont2])
assert len(common_kcs) == 1
assert "b/d" in common_kcs
def test_container_cont_inplace_update(on_device):
container0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([1], device=on_device),
"d": ivy.array([2], device=on_device),
},
}
)
id0 = id(container0)
container1 = Container(
{
"a": ivy.array([0], device=on_device),
"b": {
"c": ivy.array([0], device=on_device),
"d": ivy.array([0], device=on_device),
},
}
)
id1 = id(container1)
assert ivy.Container.cont_all_false(container0.all_equal(container1))
container0.inplace_update(container1)
assert id0 == id(container0)
assert id1 == id(container1)
assert ivy.Container.cont_all_true(container0.all_equal(container1))
def test_container_contains(on_device):
arr0 = ivy.array([0.0], device=on_device)
arr1 = ivy.array([1.0], device=on_device)
arr2 = ivy.array([2.0], device=on_device)
sub_cont = Container({"c": arr1, "d": arr2})
container = Container({"a": arr0, "b": sub_cont})
# keys
assert "a" in container
assert "b" in container
assert "c" not in container
assert "b/c" in container
assert "d" not in container
assert "b/d" in container
# sub-container
assert container.cont_contains_sub_container(container)
assert container.cont_contains_sub_container(sub_cont)
assert sub_cont in container
# partial sub-container
partial_sub_cont = Container({"b": {"d": arr2}})
assert container.cont_contains_sub_container(container, partial=True)
assert container.cont_contains_sub_container(partial_sub_cont, partial=True)
assert not partial_sub_cont.cont_contains_sub_container(container, partial=True)
# sub-structure
sub_struc = Container(
{
"c": ivy.array([3.0], device=on_device),
"d": ivy.array([4.0], device=on_device),
}
)
assert not container.cont_contains_sub_container(sub_struc)
assert sub_struc not in container
assert container.cont_contains_sub_structure(sub_struc)
assert container.cont_contains_sub_structure(container)
# partial sub-structure
partial_sub_struc = Container({"b": {"d": ivy.array([4.0], device=on_device)}})
assert container.cont_contains_sub_structure(container, partial=True)
assert container.cont_contains_sub_structure(partial_sub_struc, partial=True)
assert not partial_sub_struc.cont_contains_sub_structure(container, partial=True)
def test_container_copy(on_device):
dict_in = {
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
cont = Container(dict_in)
cont_deepcopy = cont.cont_copy()
assert np.allclose(ivy.to_numpy(cont.a), ivy.to_numpy(cont_deepcopy.a))
assert np.allclose(ivy.to_numpy(cont.b.c), ivy.to_numpy(cont_deepcopy.b.c))
assert np.allclose(ivy.to_numpy(cont.b.d), ivy.to_numpy(cont_deepcopy.b.d))
assert id(cont) != id(cont_deepcopy)
assert id(cont.a) == id(cont_deepcopy.a)
assert id(cont.b.c) == id(cont_deepcopy.b.c)
assert id(cont.b.d) == id(cont_deepcopy.b.d)
def test_container_create_if_absent(on_device):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=on_device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=on_device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=on_device),
},
}
# depth 1
container = Container(dict_in)
container.cont_create_if_absent("a", None, True)
assert np.allclose(ivy.to_numpy(container.a), np.array([[[1.0], [2.0], [3.0]]]))
container.cont_create_if_absent("e", ivy.array([[[4.0], [8.0], [12.0]]]), True)
assert np.allclose(ivy.to_numpy(container.e), np.array([[[4.0], [8.0], [12.0]]]))
# depth 2
container.cont_create_if_absent("f/g", np.array([[[5.0], [10.0], [15.0]]]), True)
assert np.allclose(ivy.to_numpy(container.f.g), np.array([[[5.0], [10.0], [15.0]]]))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_cutoff_at_depth(inplace, on_device):
# values
a_val = ivy.array([1], device=on_device)
bcde_val = ivy.array([2], device=on_device)
# depth 1
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cont_cutoff_at_depth(1, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b
# depth 2
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cont_cutoff_at_depth(2, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b.c
# depth 3
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cont_cutoff_at_depth(3, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b.c.d
# depth 4
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cont_cutoff_at_depth(4, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert np.allclose(ivy.to_numpy(cont_cutoff.b.c.d.e), ivy.to_numpy(bcde_val))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_cutoff_at_height(inplace, on_device):
# values
d_val = ivy.array([2], device=on_device)
e_val = ivy.array([3], device=on_device)
# height 0
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cont_cutoff_at_height(0, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a.c.d), ivy.to_numpy(d_val))
assert np.allclose(ivy.to_numpy(cont_cutoff.b.c.d.e), ivy.to_numpy(e_val))
# height 1
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cont_cutoff_at_height(1, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a.c
assert not cont_cutoff.b.c.d
# height 2
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cont_cutoff_at_height(2, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a
assert not cont_cutoff.b.c
# height 3
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cont_cutoff_at_height(3, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a
assert not cont_cutoff.b
# height 4
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cont_cutoff_at_height(4, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff
def test_container_deep_copy(on_device):
dict_in = {
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
cont = Container(dict_in)
cont_deepcopy = cont.cont_deep_copy()
assert np.allclose(ivy.to_numpy(cont.a), ivy.to_numpy(cont_deepcopy.a))
assert np.allclose(ivy.to_numpy(cont.b.c), ivy.to_numpy(cont_deepcopy.b.c))
assert np.allclose(ivy.to_numpy(cont.b.d), ivy.to_numpy(cont_deepcopy.b.d))
assert id(cont.a) != id(cont_deepcopy.a)
assert id(cont.b.c) != id(cont_deepcopy.b.c)
assert id(cont.b.d) != id(cont_deepcopy.b.d)
def test_container_depth(on_device):
cont_depth1 = Container(
{"a": ivy.array([1], device=on_device), "b": ivy.array([2], device=on_device)}
)
assert cont_depth1.cont_max_depth == 1
cont_depth2 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
assert cont_depth2.cont_max_depth == 2
cont_depth3 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": {"d": ivy.array([2], device=on_device)},
"e": ivy.array([3], device=on_device),
},
}
)
assert cont_depth3.cont_max_depth == 3
cont_depth4 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {"c": {"d": {"e": ivy.array([2], device=on_device)}}},
}
)
assert cont_depth4.cont_max_depth == 4
def test_container_dev_str(on_device):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=on_device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=on_device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=on_device),
},
}
container = Container(dict_in)
assert container.cont_dev_str == on_device
def test_container_diff(on_device):
# all different arrays
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=on_device),
"b": {
"c": ivy.array([5], device=on_device),
"d": ivy.array([6], device=on_device),
},
}
)
container_diff = ivy.Container.cont_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([4]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_1), np.array([6]))
container_diff_diff_only = ivy.Container.cont_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.cont_to_dict() == container_diff.cont_to_dict()
container_diff_same_only = ivy.Container.cont_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.cont_to_dict() == {}
# some different arrays
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([5], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_diff = ivy.Container.cont_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.cont_diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" in container_diff_diff_only["b"]
assert "d" not in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.cont_diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" not in container_diff_same_only["b"]
assert "d" in container_diff_same_only["b"]
# all different keys
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"e": ivy.array([1], device=on_device),
"f": {
"g": ivy.array([2], device=on_device),
"h": ivy.array([3], device=on_device),
},
}
)
container_diff = ivy.Container.cont_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([3]))
container_diff_diff_only = ivy.Container.cont_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.cont_to_dict() == container_diff.cont_to_dict()
container_diff_same_only = ivy.Container.cont_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.cont_to_dict() == {}
# some different keys
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"e": ivy.array([3], device=on_device),
},
}
)
container_diff = ivy.Container.cont_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))
container_diff_diff_only = ivy.Container.cont_diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" not in container_diff_diff_only["b"]
assert "d" in container_diff_diff_only["b"]
assert "e" in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.cont_diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" in container_diff_same_only["b"]
assert "d" not in container_diff_same_only["b"]
assert "e" not in container_diff_same_only["b"]
# same containers
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_diff = ivy.Container.cont_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.cont_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.cont_to_dict() == {}
container_diff_same_only = ivy.Container.cont_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.cont_to_dict() == container_diff.cont_to_dict()
# all different strings
container_0 = Container({"a": "1", "b": {"c": "2", "d": "3"}})
container_1 = Container({"a": "4", "b": {"c": "5", "d": "6"}})
container_diff = ivy.Container.cont_diff(container_0, container_1)
assert container_diff.a.diff_0 == "1"
assert container_diff.a.diff_1 == "4"
assert container_diff.b.c.diff_0 == "2"
assert container_diff.b.c.diff_1 == "5"
assert container_diff.b.d.diff_0 == "3"
assert container_diff.b.d.diff_1 == "6"
container_diff_diff_only = ivy.Container.cont_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.cont_to_dict() == container_diff.cont_to_dict()
container_diff_same_only = ivy.Container.cont_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.cont_to_dict() == {}
def test_container_duplicate_array_keychains(on_device):
arr1 = ivy.array([1], device=on_device)
arr2 = ivy.array([2], device=on_device)
container0 = Container({"a": arr1, "b": {"c": arr1, "d": arr2}})
container1 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([1], device=on_device),
"d": ivy.array([2], device=on_device),
},
}
)
res = ivy.Container.cont_duplicate_array_keychains(container0)
assert res == (("a", "b/c"),)
res = ivy.Container.cont_duplicate_array_keychains(container1)
assert res == ()
def test_container_find_sub_container(on_device):
arr1 = ivy.array([1], device=on_device)
arr2 = ivy.array([2], device=on_device)
arr3 = ivy.array([3], device=on_device)
dict_in = {"a": arr1, "b": {"c": arr2, "d": arr3}}
top_cont = Container(dict_in)
# full
sub_cont = Container(dict_in["b"])
assert sub_cont in top_cont
found_kc = top_cont.cont_find_sub_container(sub_cont)
assert found_kc == "b"
found_kc = top_cont.cont_find_sub_container(top_cont)
assert found_kc == ""
# partial
partial_sub_cont = Container({"d": arr3})
found_kc = top_cont.cont_find_sub_container(partial_sub_cont, partial=True)
assert found_kc == "b"
assert partial_sub_cont.cont_find_sub_container(top_cont, partial=True) is False
partial_sub_cont = Container({"b": {"d": arr3}})
found_kc = top_cont.cont_find_sub_container(partial_sub_cont, partial=True)
assert found_kc == ""
assert partial_sub_cont.cont_find_sub_container(top_cont, partial=True) is False
def test_container_find_sub_structure(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
top_cont = Container(dict_in)
# full
sub_cont = Container(
{"c": ivy.array([4], device=on_device), "d": ivy.array([5], device=on_device)}
)
assert not top_cont.cont_find_sub_container(sub_cont)
found_kc = top_cont.cont_find_sub_structure(sub_cont)
assert found_kc == "b"
found_kc = top_cont.cont_find_sub_structure(top_cont)
assert found_kc == ""
# partial
partial_sub_cont = Container({"d": ivy.array([5], device=on_device)})
found_kc = top_cont.cont_find_sub_structure(partial_sub_cont, partial=True)
assert found_kc == "b"
partial_sub_cont = Container({"b": {"d": ivy.array([5], device=on_device)}})
found_kc = top_cont.cont_find_sub_structure(partial_sub_cont, partial=True)
assert found_kc == ""
def test_container_flatten_key_chains(on_device):
container = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": {"d": ivy.array([2], device=on_device)},
"e": {"f": {"g": ivy.array([3], device=on_device)}},
},
}
)
# full
container_flat = container.cont_flatten_key_chains()
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b__c__d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b__c__d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b__e__f__g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b__e__f__g), np.array([[3]]))
# above height 1
container_flat = container.cont_flatten_key_chains(above_height=1)
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b__c"]["d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b__c.d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b__e__f"]["g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b__e__f.g), np.array([[3]]))
# below depth 1
container_flat = container.cont_flatten_key_chains(below_depth=1)
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["c__d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b.c__d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["e__f__g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b.e__f__g), np.array([[3]]))
# above height 1, below depth 1
container_flat = container.cont_flatten_key_chains(above_height=1, below_depth=1)
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["c"]["d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b.c.d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["e__f"]["g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b.e__f.g), np.array([[3]]))
def test_container_format_key_chains(on_device):
dict_in = {
"_a": ivy.array([1], device=on_device),
"b ": {
"c": ivy.array([2], device=on_device),
"d-": ivy.array([3], device=on_device),
},
}
cont = Container(dict_in)
cont_formatted = cont.cont_format_key_chains(
lambda s: s.replace("_", "").replace(" ", "").replace("-", "")
)
assert np.allclose(ivy.to_numpy(cont_formatted["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(cont_formatted.a), np.array([1]))
assert np.allclose(ivy.to_numpy(cont_formatted["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(cont_formatted.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(cont_formatted["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(cont_formatted.b.d), np.array([3]))
def test_container_from_dict(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_from_dict_w_cont_types(on_device):
# ToDo: add tests for backends other than jax
if ivy.current_backend_str() == "jax":
pytest.skip()
from haiku._src.data_structures import FlatMapping
dict_in = {
"a": ivy.array([1], device=on_device),
"b": FlatMapping(
{
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
}
),
}
container = Container(dict_in)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_from_flat_list(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
flat_list = [4, 5, 6]
container = container.cont_from_flat_list(flat_list)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
def test_container_from_kwargs(on_device):
container = Container(
a=ivy.array([1], device=on_device),
b={
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_from_list(on_device):
list_in = [
ivy.array([1], device=on_device),
[ivy.array([2], device=on_device), ivy.array([3], device=on_device)],
]
container = Container(list_in, types_to_iteratively_nest=[list])
assert np.allclose(ivy.to_numpy(container["it_0"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.it_0), np.array([1]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_0"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.it_1.it_0), np.array([2]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_1"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.it_1.it_1), np.array([3]))
@pytest.mark.skip("Prevents PyTest from Terminating.")
def test_container_from_queues(on_device):
if "gpu" in on_device:
# Cannot re-initialize CUDA in forked subprocess. 'spawn'
# start method must be used.
pytest.skip()
if ivy.gpu_is_available() and ivy.current_backend_str() == "jax":
# Not found a way to set default on_device for JAX, and this causes
# issues with multiprocessing and CUDA, even when device=cpu
# ToDo: find a fix for this problem ^^
pytest.skip()
def worker_fn(in_queue, out_queue, load_size, worker_id):
keep_going = True
while keep_going:
try:
keep_going = in_queue.get(timeout=0.1)
except queue.Empty:
continue
out_queue.put(
{
"a": [
ivy.to_native(ivy.array([1.0, 2.0, 3.0], device=on_device))
* worker_id
]
* load_size
}
)
workers = []
in_queues = []
out_queues = []
queue_load_sizes = [1, 2, 1]
for i, queue_load_size in enumerate(queue_load_sizes):
input_queue = multiprocessing.Queue()
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(
target=worker_fn, args=(input_queue, output_queue, queue_load_size, i + 1)
)
worker.start()
in_queues.append(input_queue)
out_queues.append(output_queue)
workers.append(worker)
container = Container(
queues=out_queues, queue_load_sizes=queue_load_sizes, queue_timeout=0.25
)
# queue 0
queue_was_empty = False
try:
container[0]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[0].put(True)
assert np.allclose(ivy.to_numpy(container[0].a), np.array([1.0, 2.0, 3.0]))
assert np.allclose(ivy.to_numpy(container[0].a), np.array([1.0, 2.0, 3.0]))
# queue 1
queue_was_empty = False
try:
container[1]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
queue_was_empty = False
try:
container[2]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[1].put(True)
assert np.allclose(ivy.to_numpy(container[1].a), np.array([2.0, 4.0, 6.0]))
assert np.allclose(ivy.to_numpy(container[1].a), np.array([2.0, 4.0, 6.0]))
assert np.allclose(ivy.to_numpy(container[2].a), np.array([2.0, 4.0, 6.0]))
assert np.allclose(ivy.to_numpy(container[2].a), np.array([2.0, 4.0, 6.0]))
# queue 2
queue_was_empty = False
try:
container[3]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[2].put(True)
assert np.allclose(ivy.to_numpy(container[3].a), np.array([3.0, 6.0, 9.0]))
assert np.allclose(ivy.to_numpy(container[3].a), np.array([3.0, 6.0, 9.0]))
# stop workers
in_queues[0].put(False)
in_queues[1].put(False)
in_queues[2].put(False)
in_queues[0].close()
in_queues[1].close()
in_queues[2].close()
# join workers
for worker in workers:
worker.join()
del container
def test_container_from_tuple(on_device):
tuple_in = (
ivy.array([1], device=on_device),
(ivy.array([2], device=on_device), ivy.array([3], device=on_device)),
)
container = Container(tuple_in, types_to_iteratively_nest=[tuple])
assert np.allclose(ivy.to_numpy(container["it_0"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.it_0), np.array([1]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_0"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.it_1.it_0), np.array([2]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_1"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.it_1.it_1), np.array([3]))
def test_container_has_key(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
assert container.cont_has_key("a") # noqa
assert container.cont_has_key("b") # noqa
assert container.cont_has_key("c") # noqa
assert container.cont_has_key("d") # noqa
assert not container.cont_has_key("e") # noqa
assert not container.cont_has_key("f") # noqa
def test_container_has_key_chain(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
assert container.cont_has_key_chain("a")
assert container.cont_has_key_chain("b")
assert container.cont_has_key_chain("b/c")
assert container.cont_has_key_chain("b/d")
assert not container.cont_has_key_chain("b/e")
assert not container.cont_has_key_chain("c")
def test_container_identical(on_device):
# without key_chains specification
arr1 = ivy.array([1], device=on_device)
arr2 = ivy.array([2], device=on_device)
arr3 = ivy.array([3], device=on_device)
container0 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
container1 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
container2 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container3 = Container({"b": {"d": arr3}})
container4 = Container({"d": arr3})
# the same
assert ivy.Container.cont_identical([container0, container1])
assert ivy.Container.cont_identical([container1, container0])
# not the same
assert not ivy.Container.cont_identical([container0, container2])
assert not ivy.Container.cont_identical([container2, container0])
assert not ivy.Container.cont_identical([container1, container2])
assert not ivy.Container.cont_identical([container2, container1])
# partial
assert ivy.Container.cont_identical([container0, container3], partial=True)
assert ivy.Container.cont_identical([container3, container0], partial=True)
assert not ivy.Container.cont_identical([container0, container4], partial=True)
assert not ivy.Container.cont_identical([container4, container0], partial=True)
def test_container_identical_array_shapes(on_device):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1, 2], device=on_device),
"b": {
"c": ivy.array([2, 3, 4], device=on_device),
"d": ivy.array([3, 4, 5, 6], device=on_device),
},
}
)
container1 = Container(
{
"a": ivy.array([1, 2, 3, 4], device=on_device),
"b": {
"c": ivy.array([3, 4], device=on_device),
"d": ivy.array([3, 4, 5], device=on_device),
},
}
)
container2 = Container(
{
"a": ivy.array([1, 2, 3, 4], device=on_device),
"b": {
"c": ivy.array([3, 4], device=on_device),
"d": ivy.array([3, 4, 5, 6], device=on_device),
},
}
)
# with identical
assert ivy.Container.cont_identical_array_shapes([container0, container1])
assert ivy.Container.cont_identical_array_shapes([container1, container0])
assert ivy.Container.cont_identical_array_shapes(
[container1, container0, container1]
)
assert not ivy.Container.cont_identical([container0, container2])
assert not ivy.Container.cont_identical([container1, container2])
assert not ivy.Container.cont_identical([container0, container1, container2])
def test_container_identical_configs(on_device):
container0 = Container({"a": ivy.array([1], device=on_device)}, print_limit=5)
container1 = Container({"a": ivy.array([1], device=on_device)}, print_limit=5)
container2 = Container({"a": ivy.array([1], device=on_device)}, print_limit=10)
# with identical
assert ivy.Container.cont_identical_configs([container0, container1])
assert ivy.Container.cont_identical_configs([container1, container0])
assert ivy.Container.cont_identical_configs([container1, container0, container1])
# without identical
assert not ivy.Container.cont_identical_configs([container1, container2])
assert not ivy.Container.cont_identical_configs(
[container1, container0, container2]
)
def test_container_identical_structure(on_device):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container1 = Container(
{
"a": ivy.array([3], device=on_device),
"b": {
"c": ivy.array([4], device=on_device),
"d": ivy.array([5], device=on_device),
},
}
)
container2 = Container(
{
"a": ivy.array([3], device=on_device),
"b": {
"c": ivy.array([4], device=on_device),
"d": ivy.array([5], device=on_device),
"e": ivy.array([6], device=on_device),
},
}
)
container3 = Container(
{
"a": ivy.array([3], device=on_device),
"b": {
"c": ivy.array([4], device=on_device),
"d": ivy.array([5], device=on_device),
},
"e": ivy.array([6], device=on_device),
}
)
container4 = Container({"b": {"d": ivy.array([4], device=on_device)}})
container5 = Container({"d": ivy.array([4], device=on_device)})
# with identical
assert ivy.Container.cont_identical_structure([container0, container1])
assert ivy.Container.cont_identical_structure([container1, container0])
assert ivy.Container.cont_identical_structure([container1, container0, container1])
# without identical
assert not ivy.Container.cont_identical_structure([container2, container3])
assert not ivy.Container.cont_identical_structure([container0, container3])
assert not ivy.Container.cont_identical_structure([container1, container2])
assert not ivy.Container.cont_identical_structure(
[container1, container0, container2]
)
# partial
assert ivy.Container.cont_identical_structure(
[container0, container4], partial=True
)
assert ivy.Container.cont_identical_structure(
[container1, container4], partial=True
)
assert ivy.Container.cont_identical_structure(
[container2, container4], partial=True
)
assert ivy.Container.cont_identical_structure(
[container3, container4], partial=True
)
assert ivy.Container.cont_identical_structure(
[container4, container4], partial=True
)
assert not ivy.Container.cont_identical_structure(
[container0, container5], partial=True
)
assert not ivy.Container.cont_identical_structure(
[container1, container5], partial=True
)
assert not ivy.Container.cont_identical_structure(
[container2, container5], partial=True
)
assert not ivy.Container.cont_identical_structure(
[container3, container5], partial=True
)
assert not ivy.Container.cont_identical_structure(
[container4, container5], partial=True
)
def test_container_if_exists(on_device):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=on_device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=on_device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=on_device),
},
}
container = Container(dict_in)
assert np.allclose(
ivy.to_numpy(container.cont_if_exists("a")), np.array([[[1.0], [2.0], [3.0]]])
)
assert "c" not in container
assert container.cont_if_exists("c") is None
container["c"] = ivy.array([[[1.0], [2.0], [3.0]]], device=on_device)
assert np.allclose(
ivy.to_numpy(container.cont_if_exists("c")), np.array([[[1.0], [2.0], [3.0]]])
)
assert container.cont_if_exists("d") is None
container.d = ivy.array([[[1.0], [2.0], [3.0]]], device=on_device)
assert np.allclose(
ivy.to_numpy(container.cont_if_exists("d")), np.array([[[1.0], [2.0], [3.0]]])
)
def test_container_inplace(on_device):
container0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([1], device=on_device),
"d": ivy.array([2], device=on_device),
},
}
)
const = 3
arr = ivy.array([1], device=on_device)
container1 = Container(
{
"a": ivy.array([3], device=on_device),
"b": {
"c": ivy.array([4], device=on_device),
"d": ivy.array([5], device=on_device),
},
}
)
special_funcs = [
"__add__",
"__and__",
"__floordiv__",
"__lshift__",
"__matmul__",
"__mod__",
"__mul__",
"__pow__",
"__rshift__",
"__sub__",
"__truediv__",
"__xor__",
]
for func_str in special_funcs:
func = getattr(Container, func_str)
ifunc = getattr(Container, f"{func_str[:2]}i{func_str[2:]}")
for value in [
const,
arr,
container1,
]:
if value == const and func_str == "__matmul__":
continue
container0_copy = container0.cont_deep_copy()
id_before_op = id(container0_copy)
og_ids = container0_copy.cont_map(lambda x, _: id(x))
ifunc(container0_copy, value)
op_ids = container0_copy.cont_map(lambda x, _: id(x))
assert func(container0, value) == container0_copy # values
assert id(container0_copy) == id_before_op # container ids
assert og_ids == op_ids # value ids
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_key_chains_containing(include_empty, on_device):
a_val = Container() if include_empty else ivy.array([1], device=on_device)
bc_val = Container() if include_empty else ivy.array([2], device=on_device)
bd_val = Container() if include_empty else ivy.array([3], device=on_device)
dict_in = {"a_sub": a_val, "b": {"c": bc_val, "d_sub": bd_val}}
container = Container(dict_in)
kcs = container.cont_key_chains_containing("sub", include_empty)
assert kcs[0] == "a_sub"
assert kcs[1] == "b/d_sub"
def test_container_list_join(on_device):
container_0 = Container(
{
"a": [ivy.array([1], device=on_device)],
"b": {
"c": [ivy.array([2], device=on_device)],
"d": [ivy.array([3], device=on_device)],
},
}
)
container_1 = Container(
{
"a": [ivy.array([4], device=on_device)],
"b": {
"c": [ivy.array([5], device=on_device)],
"d": [ivy.array([6], device=on_device)],
},
}
)
container_list_joined = ivy.Container.cont_list_join([container_0, container_1])
assert np.allclose(ivy.to_numpy(container_list_joined["a"][0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_joined.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["c"][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["d"][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.d[0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_joined["a"][1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_joined.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["c"][1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["d"][1]), np.array([6]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.d[1]), np.array([6]))
def test_container_list_stack(on_device):
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=on_device),
"b": {
"c": ivy.array([5], device=on_device),
"d": ivy.array([6], device=on_device),
},
}
)
container_list_stacked = ivy.Container.cont_list_stack(
[container_0, container_1], 0
)
assert np.allclose(ivy.to_numpy(container_list_stacked["a"][0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_stacked.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["c"][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["d"][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.d[0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_stacked["a"][1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_stacked.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["c"][1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["d"][1]), np.array([6]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.d[1]), np.array([6]))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_map(inplace, on_device):
# without key_chains specification
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container_orig = Container(dict_in)
container = container_orig.cont_deep_copy()
container_mapped = container.cont_map(lambda x, _: x + 1, inplace=inplace)
if inplace:
container_iterator = container.cont_to_iterator()
else:
container_iterator = container_mapped.cont_to_iterator()
for (key, value), expected_value in zip(
container_iterator,
[
ivy.array([2], device=on_device),
ivy.array([3], device=on_device),
ivy.array([4], device=on_device),
],
):
assert ivy.to_numpy(value) == ivy.to_numpy(expected_value)
# with key_chains to apply
container = container_orig.cont_deep_copy()
container_mapped = container.cont_map(
lambda x, _: x + 1, ["a", "b/c"], inplace=inplace
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[3]]))
# with key_chains to apply pruned
container = container_orig.cont_deep_copy()
container_mapped = container.cont_map(
lambda x, _: x + 1, ["a", "b/c"], prune_unapplied=True, inplace=inplace
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
if not inplace:
assert "b/d" not in container_mapped
# with key_chains to not apply
container = container_orig.cont_deep_copy()
container_mapped = container.cont_map(
lambda x, _: x + 1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
inplace=inplace,
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[3]]))
# with key_chains to not apply pruned
container = container_orig.cont_deep_copy()
container_mapped = container.cont_map(
lambda x, _: x + 1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
inplace=inplace,
)
if inplace:
container_mapped = container
if not inplace:
assert "a" not in container_mapped
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
if not inplace:
assert "b/d" not in container_mapped
# with sequences
container_orig = Container(
{
"a": ivy.array([1], device=on_device),
"b": [ivy.array([2], device=on_device), ivy.array([3], device=on_device)],
}
)
container = container_orig.cont_deep_copy()
container_mapped = container.cont_map(
lambda x, _: x + 1, inplace=inplace, map_sequences=True
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_mapped["b"][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_mapped["b"][1]), np.array([4]))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_map_sub_conts(inplace, on_device):
# without key_chains specification
container_orig = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
def _add_e_attr(cont_in):
cont_in.e = ivy.array([4], device=on_device)
return cont_in
# with self
container = container_orig.cont_deep_copy()
container_mapped = container.cont_map_sub_conts(
lambda c, _: _add_e_attr(c), inplace=inplace
)
if inplace:
container_mapped = container
assert "e" in container_mapped
assert np.array_equal(ivy.to_numpy(container_mapped.e), np.array([4]))
assert "e" in container_mapped.b
assert np.array_equal(ivy.to_numpy(container_mapped.b.e), np.array([4]))
# without self
container = container_orig.cont_deep_copy()
container_mapped = container.cont_map_sub_conts(
lambda c, _: _add_e_attr(c), include_self=False, inplace=inplace
)
if inplace:
container_mapped = container
assert "e" not in container_mapped
assert "e" in container_mapped.b
assert np.array_equal(ivy.to_numpy(container_mapped.b.e), np.array([4]))
def test_container_multi_map(on_device):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container1 = Container(
{
"a": ivy.array([3], device=on_device),
"b": {
"c": ivy.array([4], device=on_device),
"d": ivy.array([5], device=on_device),
},
}
)
# with key_chains to apply
container_mapped = ivy.Container.cont_multi_map(
lambda x, _: x[0] + x[1], [container0, container1], assert_identical=True
)
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[6]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[6]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["d"]), np.array([[8]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[8]]))
# with sequences
container0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": [
ivy.array([2], device=on_device),
ivy.array([3], device=on_device),
],
}
)
container1 = Container(
{
"a": ivy.array([3], device=on_device),
"b": [
ivy.array([4], device=on_device),
ivy.array([5], device=on_device),
],
}
)
container_mapped = ivy.Container.cont_multi_map(
lambda x, _: x[0] + x[1],
[container0, container1],
map_nests=True,
assert_identical=True,
)
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_mapped["b"][0]), np.array([6]))
assert np.allclose(ivy.to_numpy(container_mapped["b"][1]), np.array([8]))
# Non identical containers
a = ivy.Container(a={"b": 2, "c": 4}, d={"e": 6, "f": 9})
b = ivy.Container(a=2, d=3)
container_mapped = ivy.Container.cont_multi_map(lambda xs, _: xs[0] / xs[1], [a, b])
assert np.allclose(ivy.to_numpy(container_mapped["a"].b), 1)
assert np.allclose(ivy.to_numpy(container_mapped["a"]["c"]), 2)
assert np.allclose(ivy.to_numpy(container_mapped.d.e), 2)
assert np.allclose(ivy.to_numpy(container_mapped["d"].f), 3)
def test_container_num_arrays(on_device):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=on_device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=on_device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=on_device),
},
}
container = Container(dict_in)
assert container.cont_num_arrays() == 3
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=on_device),
"b": {
"c": _variable(ivy.array([[5.0, 10.0, 15.0, 20.0]], device=on_device)),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=on_device),
},
}
container = Container(dict_in)
assert (
container.cont_num_arrays() == 3
if ivy.current_backend_str() in ("numpy", "jax")
else 2
)
# noinspection PyUnresolvedReferences
def test_container_overwrite_at_key_chain(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container_orig = Container(dict_in)
# explicit function call
container = container_orig.cont_copy()
# noinspection PyBroadException
try:
container.cont_overwrite_at_key_chain("b/e", ivy.array([4], device=on_device))
exception_raised = False
except Exception:
exception_raised = True
assert exception_raised
container = container.cont_overwrite_at_key_chain(
"b/d", ivy.array([4], device=on_device)
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([4]))
def test_container_overwrite_at_key_chains(on_device):
container = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
target_container = Container(
{
"a": ivy.array([4], device=on_device),
"b": {"d": ivy.array([5], device=on_device)},
}
)
new_container = container.cont_overwrite_at_key_chains(
target_container, inplace=False
)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([5]))
target_container = Container({"b": {"c": ivy.array([7], device=on_device)}})
new_container = container.cont_overwrite_at_key_chains(
target_container, inplace=False
)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([7]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
# noinspection PyBroadException
try:
container.cont_overwrite_at_key_chains(
Container({"b": {"e": ivy.array([5], device=on_device)}})
)
exception_raised = False
except Exception:
exception_raised = True
assert exception_raised
def test_container_pickle(on_device):
dict_in = {
"a": ivy.array([np.float32(1.0)], device=on_device),
"b": {
"c": ivy.array([np.float32(2.0)], device=on_device),
"d": ivy.array([np.float32(3.0)], device=on_device),
},
}
# without module attribute
cont = Container(dict_in)
# paddle tansor can't be pickled directly as mentioned
# in the issue https://github.com/PaddlePaddle/Paddle/issues/41107
if ivy.backend == "paddle":
cont = cont.to_numpy()
assert cont._local_ivy is None
pickled = pickle.dumps(cont)
cont_again = pickle.loads(pickled)
assert cont_again._local_ivy is None
ivy.Container.cont_identical_structure([cont, cont_again])
ivy.Container.cont_identical_configs([cont, cont_again])
# with module attribute
cont = Container(dict_in, ivyh=ivy)
# paddle tansor can't be pickled directly as mentioned
# in the issue https://github.com/PaddlePaddle/Paddle/issues/41107
if ivy.backend == "paddle":
cont = cont.to_numpy()
assert cont._local_ivy is ivy
pickled = pickle.dumps(cont)
cont_again = pickle.loads(pickled)
# noinspection PyUnresolvedReferences
assert cont_again._local_ivy.current_backend_str() is ivy.current_backend_str()
ivy.Container.cont_identical_structure([cont, cont_again])
ivy.Container.cont_identical_configs([cont, cont_again])
def test_container_prune_empty(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {"c": {}, "d": ivy.array([3], device=on_device)},
}
container = Container(dict_in)
container_pruned = container.cont_prune_empty()
assert np.allclose(ivy.to_numpy(container_pruned["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
def _test_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
def test_container_prune_key_chain(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {"c": ivy.array([2], device=on_device), "d": None},
}
container = Container(dict_in)
container_pruned = container.cont_prune_key_chain("b/c")
assert np.allclose(ivy.to_numpy(container_pruned["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert container_pruned["b"]["d"] is None
assert container_pruned.b.d is None
assert "c" not in container_pruned["b"].keys()
def _test_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
container_pruned = container.cont_prune_key_chain("b")
assert np.allclose(ivy.to_numpy(container_pruned["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert "b" not in container_pruned.keys()
def _test_exception(container_in):
try:
_ = container_in.b
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
def test_container_prune_key_chains(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
container_pruned = container.cont_prune_key_chains(["a", "b/c"])
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
def _test_a_exception(container_in):
try:
_ = container_in.a
return False
except AttributeError:
return True
def _test_bc_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
container_pruned = container.cont_prune_key_chains(
Container({"a": True, "b": {"c": True}})
)
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
def test_container_prune_key_from_key_chains(on_device):
container = Container(
{
"Ayy": ivy.array([1], device=on_device),
"Bee": {
"Cee": ivy.array([2], device=on_device),
"Dee": ivy.array([3], device=on_device),
},
"Beh": {
"Ceh": ivy.array([4], device=on_device),
"Deh": ivy.array([5], device=on_device),
},
}
)
# absolute
container_pruned = container.cont_prune_key_from_key_chains("Bee")
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert "Bee" not in container_pruned
# containing
container_pruned = container.cont_prune_key_from_key_chains(containing="B")
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned["Ceh"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ceh), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned["Deh"]), np.array([[5]]))
assert np.allclose(ivy.to_numpy(container_pruned.Deh), np.array([[5]]))
assert "Bee" not in container_pruned
assert "Beh" not in container_pruned
def test_container_prune_keys(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
container_pruned = container.cont_prune_keys(["a", "c"])
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
def _test_a_exception(container_in):
try:
_ = container_in.a
return False
except AttributeError:
return True
def _test_bc_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
def _test_bd_exception(container_in):
try:
_ = container_in.b.d
return False
except AttributeError:
return True
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
container_pruned = container.cont_prune_keys(["a", "d"])
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.c), np.array([[2]]))
assert "d" not in container_pruned["b"]
assert _test_a_exception(container_pruned)
assert _test_bd_exception(container_pruned)
def test_container_prune_keys_from_key_chains(on_device):
container = Container(
{
"Ayy": ivy.array([1], device=on_device),
"Bee": {
"Cee": ivy.array([2], device=on_device),
"Dee": ivy.array([3], device=on_device),
},
"Eee": {"Fff": ivy.array([4], device=on_device)},
}
)
# absolute
container_pruned = container.cont_prune_keys_from_key_chains(["Bee", "Eee"])
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned["Fff"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Fff), np.array([[4]]))
assert "Bee" not in container_pruned
assert "Eee" not in container_pruned
# containing
container_pruned = container.cont_prune_keys_from_key_chains(containing=["B", "E"])
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned["Fff"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Fff), np.array([[4]]))
assert "Bee" not in container_pruned
assert "Eee" not in container_pruned
def test_container_reduce(on_device):
container_a = ivy.Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_b = ivy.Container(
{
"a": ivy.array([2], device=on_device),
"b": {
"c": ivy.array([4], device=on_device),
"d": ivy.array([6], device=on_device),
},
}
)
res = ivy.Container.cont_reduce([container_a, container_b], lambda x: x[0] + x[1])
assert np.allclose(ivy.to_numpy(res.a), np.array([3.0]))
assert np.allclose(ivy.to_numpy(res.b.c), np.array([6]))
assert np.allclose(ivy.to_numpy(res.b.d), np.array([9]))
def test_container_remove_key_length_limit(on_device):
cont = Container(
{
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
)
cont.cont_with_key_length_limit(5, inplace=True)
default_key_length_limit = cont._key_length_limit
id_cont = id(cont)
cont1 = cont.cont_remove_key_length_limit()
assert cont1._key_length_limit is None
assert id(cont1) != id(cont)
assert cont._key_length_limit == default_key_length_limit
assert cont.b._key_length_limit == default_key_length_limit
assert cont._key_length_limit != cont1._key_length_limit
cont.cont_remove_key_length_limit(inplace=True)
assert cont._key_length_limit is None
assert cont.b._key_length_limit is None
assert id(cont) == id_cont
def test_container_remove_print_limit(on_device):
cont = Container(
{
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
)
default_print_limit = cont._print_limit
id_cont = id(cont)
cont1 = cont.cont_remove_print_limit()
assert cont1._print_limit is None
assert id(cont1) != id(cont)
assert cont._print_limit == default_print_limit
assert cont._print_limit != cont1._print_limit
assert cont.b._print_limit == default_print_limit
cont.cont_remove_print_limit(inplace=True)
assert cont._print_limit is None
assert cont.b._print_limit is None
assert id(cont) == id_cont
def test_container_reshape_like(on_device):
container = Container(
{
"a": ivy.array([[1.0]], device=on_device),
"b": {
"c": ivy.array([[3.0], [4.0]], device=on_device),
"d": ivy.array([[5.0], [6.0], [7.0]], device=on_device),
},
}
)
new_shapes = Container({"a": (1,), "b": {"c": (1, 2, 1), "d": (3, 1, 1)}})
# without leading shape
container_reshaped = container.cont_reshape_like(new_shapes)
assert list(container_reshaped["a"].shape) == [1]
assert list(container_reshaped.a.shape) == [1]
assert list(container_reshaped["b"]["c"].shape) == [1, 2, 1]
assert list(container_reshaped.b.c.shape) == [1, 2, 1]
assert list(container_reshaped["b"]["d"].shape) == [3, 1, 1]
assert list(container_reshaped.b.d.shape) == [3, 1, 1]
# with leading shape
container = Container(
{
"a": ivy.array([[[1.0]], [[1.0]], [[1.0]]], device=on_device),
"b": {
"c": ivy.array(
[[[3.0], [4.0]], [[3.0], [4.0]], [[3.0], [4.0]]], device=on_device
),
"d": ivy.array(
[
[[5.0], [6.0], [7.0]],
[[5.0], [6.0], [7.0]],
[[5.0], [6.0], [7.0]],
],
device=on_device,
),
},
}
)
container_reshaped = container.cont_reshape_like(new_shapes, leading_shape=[3])
assert list(container_reshaped["a"].shape) == [3, 1]
assert list(container_reshaped.a.shape) == [3, 1]
assert list(container_reshaped["b"]["c"].shape) == [3, 1, 2, 1]
assert list(container_reshaped.b.c.shape) == [3, 1, 2, 1]
assert list(container_reshaped["b"]["d"].shape) == [3, 3, 1, 1]
assert list(container_reshaped.b.d.shape) == [3, 3, 1, 1]
def test_container_restructure(on_device):
container = Container(
{
"a": ivy.array([[1, 2], [3, 4]], device=on_device),
"b": {
"c": ivy.array([[2, 4], [6, 8]], device=on_device),
"d": ivy.array([3, 6, 9, 12], device=on_device),
},
}
)
container_restructured = container.cont_restructure(
{
"a": {"key_chain": "A", "pattern": "a b -> b a"},
"b/c": {"key_chain": "B/C", "pattern": "a b -> (a b)"},
"b/d": {
"key_chain": "B/D",
"pattern": "(a b) -> a b",
"axes_lengths": {"a": 2, "b": 2},
},
},
keep_orig=False,
)
assert np.allclose(
ivy.to_numpy(container_restructured["A"]), np.array([[1, 3], [2, 4]])
)
assert np.allclose(
ivy.to_numpy(container_restructured.A), np.array([[1, 3], [2, 4]])
)
assert np.allclose(
ivy.to_numpy(container_restructured["B/C"]), np.array([2, 4, 6, 8])
)
assert np.allclose(ivy.to_numpy(container_restructured.B.C), np.array([2, 4, 6, 8]))
assert np.allclose(
ivy.to_numpy(container_restructured["B/D"]), np.array([[3, 6], [9, 12]])
)
assert np.allclose(
ivy.to_numpy(container_restructured.B.D), np.array([[3, 6], [9, 12]])
)
def test_container_restructure_key_chains(on_device):
# single
container = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_restructured = container.cont_restructure_key_chains({"a": "A"})
assert np.allclose(ivy.to_numpy(container_restructured["A"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured.A), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured["b/c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured["b/d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_restructured.b.d), np.array([[3]]))
# full
container = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_restructured = container.cont_restructure_key_chains(
{"a": "A", "b/c": "B/C", "b/d": "B/D"}
)
assert np.allclose(ivy.to_numpy(container_restructured["A"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured.A), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured["B/C"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured.B.C), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured["B/D"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_restructured.B.D), np.array([[3]]))
# noinspection PyUnresolvedReferences
def test_container_set_at_key_chain(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container_orig = Container(dict_in)
# explicit function call
container = container_orig.cont_copy()
container = container.cont_set_at_key_chain("b/e", ivy.array([4], device=on_device))
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
container = container.cont_set_at_key_chain("f", ivy.array([5], device=on_device))
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container["f"]), np.array([5]))
# overridden built-in function call
container = container_orig.cont_copy()
assert "b/e" not in container
container["b/e"] = ivy.array([4], device=on_device)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
assert "f" not in container
container["f"] = ivy.array([5], device=on_device)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container["f"]), np.array([5]))
def test_container_set_at_key_chains(on_device):
container = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
target_container = Container(
{
"a": ivy.array([4], device=on_device),
"b": {"d": ivy.array([5], device=on_device)},
}
)
new_container = container.cont_set_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([5]))
target_container = Container({"b": {"c": ivy.array([7], device=on_device)}})
new_container = container.cont_set_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([7]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
# noinspection PyUnresolvedReferences
def test_container_set_at_keys(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container_orig = Container(dict_in)
# explicit function call
orig_container = container_orig.cont_copy()
container = orig_container.cont_set_at_keys({"b": ivy.array([4], device=on_device)})
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]), np.array([4]))
assert not container.cont_has_key("c") # noqa
assert not container.cont_has_key("d") # noqa
container = orig_container.cont_set_at_keys(
{"a": ivy.array([5], device=on_device), "c": ivy.array([6], device=on_device)}
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
def test_container_shapes(on_device):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=on_device),
"b": {
"c": ivy.array([[[2.0], [4.0]]], device=on_device),
"d": ivy.array([[9.0]], device=on_device),
},
}
container_shapes = Container(dict_in).cont_shapes
assert list(container_shapes["a"]) == [1, 3, 1]
assert list(container_shapes.a) == [1, 3, 1]
assert list(container_shapes["b"]["c"]) == [1, 2, 1]
assert list(container_shapes.b.c) == [1, 2, 1]
assert list(container_shapes["b"]["d"]) == [1, 1]
assert list(container_shapes.b.d) == [1, 1]
def test_container_show(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
cont = Container(dict_in)
print(cont)
cont.cont_show()
def test_container_show_sub_container(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
top_cont = Container(dict_in)
sub_cont = Container(dict_in["b"])
top_cont.cont_show_sub_container("b")
top_cont.cont_show_sub_container(sub_cont)
def test_container_size_ordered_arrays(on_device):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=on_device),
"b": {
"c": ivy.array([[5.0, 10.0]], device=on_device),
"d": ivy.array([[10.0, 9.0, 8.0]], device=on_device),
},
}
container = Container(dict_in)
size_ordered = container.cont_size_ordered_arrays()
assert np.allclose(ivy.to_numpy(size_ordered.a), np.array([[0.0, 1.0, 2.0, 3.0]]))
assert np.allclose(ivy.to_numpy(size_ordered.b__c), np.array([[5.0, 10.0]]))
assert np.allclose(ivy.to_numpy(size_ordered.b__d), np.array([[10.0, 9.0, 8.0]]))
for v, arr in zip(
size_ordered.values(),
[
np.array([[5.0, 10.0]]),
np.array([[10.0, 9.0, 8.0]]),
np.array([[0.0, 1.0, 2.0, 3.0]]),
],
):
assert np.allclose(ivy.to_numpy(v), arr)
def test_container_slice(on_device):
dict_in = {
"a": ivy.array([[0.0], [1.0]], device=on_device),
"b": {
"c": ivy.array([[1.0], [2.0]], device=on_device),
"d": ivy.array([[2.0], [3.0]], device=on_device),
},
}
container = Container(dict_in)
container0 = container[0]
container1 = container[1]
assert np.array_equal(ivy.to_numpy(container0["a"]), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(container0.a), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(container0["b"]["c"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container0.b.c), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container0["b"]["d"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container0.b.d), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container1["a"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container1.a), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container1["b"]["c"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container1.b.c), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container1["b"]["d"]), np.array([3.0]))
assert np.array_equal(ivy.to_numpy(container1.b.d), np.array([3.0]))
@pytest.mark.parametrize("str_slice", [True, False])
def test_container_slice_keys(str_slice, on_device):
# values
a_val = ivy.array([1], device=on_device)
b_val = ivy.array([2], device=on_device)
c_val = ivy.array([3], device=on_device)
d_val = ivy.array([4], device=on_device)
e_val = ivy.array([5], device=on_device)
# slice
if str_slice:
slc = "b:d"
else:
slc = slice(1, 4, 1)
# without dict
cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
cont_sliced = cont.cont_slice_keys(slc)
assert "a" not in cont_sliced
assert np.allclose(ivy.to_numpy(cont_sliced.b), ivy.to_numpy(b_val))
assert np.allclose(ivy.to_numpy(cont_sliced.c), ivy.to_numpy(c_val))
assert np.allclose(ivy.to_numpy(cont_sliced.d), ivy.to_numpy(d_val))
assert "e" not in cont_sliced
# with dict, depth 0
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.cont_slice_keys({0: slc})
assert "a" not in cont_sliced
assert Container.cont_identical([cont_sliced.b, sub_cont])
assert Container.cont_identical([cont_sliced.c, sub_cont])
assert Container.cont_identical([cont_sliced.d, sub_cont])
assert "e" not in cont_sliced
# with dict, depth 1
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
sub_sub_cont = Container({"b": b_val, "c": c_val, "d": d_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.cont_slice_keys({1: slc})
assert Container.cont_identical([cont_sliced.a, sub_sub_cont])
assert Container.cont_identical([cont_sliced.b, sub_sub_cont])
assert Container.cont_identical([cont_sliced.c, sub_sub_cont])
assert Container.cont_identical([cont_sliced.d, sub_sub_cont])
assert Container.cont_identical([cont_sliced.e, sub_sub_cont])
# with dict, depth 0, 1
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
sub_sub_cont = Container({"b": b_val, "c": c_val, "d": d_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.cont_slice_keys({0: slc, 1: slc})
assert "a" not in cont_sliced
assert Container.cont_identical([cont_sliced.b, sub_sub_cont])
assert Container.cont_identical([cont_sliced.c, sub_sub_cont])
assert Container.cont_identical([cont_sliced.d, sub_sub_cont])
assert "e" not in cont_sliced
# all depths
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
sub_sub_cont = Container({"b": b_val, "c": c_val, "d": d_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.cont_slice_keys(slc, all_depths=True)
assert "a" not in cont_sliced
assert Container.cont_identical([cont_sliced.b, sub_sub_cont])
assert Container.cont_identical([cont_sliced.c, sub_sub_cont])
assert Container.cont_identical([cont_sliced.d, sub_sub_cont])
assert "e" not in cont_sliced
def test_container_slice_via_key(on_device):
dict_in = {
"a": {
"x": ivy.array([0.0], device=on_device),
"y": ivy.array([1.0], device=on_device),
},
"b": {
"c": {
"x": ivy.array([1.0], device=on_device),
"y": ivy.array([2.0], device=on_device),
},
"d": {
"x": ivy.array([2.0], device=on_device),
"y": ivy.array([3.0], device=on_device),
},
},
}
container = Container(dict_in)
containerx = container.cont_slice_via_key("x")
containery = container.cont_slice_via_key("y")
assert np.array_equal(ivy.to_numpy(containerx["a"]), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(containerx.a), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(containerx["b"]["c"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containerx.b.c), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containerx["b"]["d"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containerx.b.d), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containery["a"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containery.a), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containery["b"]["c"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containery.b.c), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containery["b"]["d"]), np.array([3.0]))
assert np.array_equal(ivy.to_numpy(containery.b.d), np.array([3.0]))
def test_container_sort_by_key(on_device):
dict_in = {
"b": ivy.array([1], device=on_device),
"a": {
"d": ivy.array([2], device=on_device),
"c": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
container_sorted = container.cont_sort_by_key()
for k, k_true in zip(container_sorted.keys(), ["a", "b"]):
assert k == k_true
for k, k_true in zip(container_sorted.a.keys(), ["c", "d"]):
assert k == k_true
def test_container_split_conts(on_device):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=on_device),
"b": {
"c": ivy.array([[2], [3], [4]], device=on_device),
"d": ivy.array([[3], [4], [5]], device=on_device),
},
}
container = Container(dict_in)
# without key_chains specification
container_split = container.split_conts(1, -1)
for cont, a, bc, bd in zip(container_split, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont["a"])[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a)[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont["b"]["c"])[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c)[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont["b"]["d"])[0], np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d)[0], np.array([bd]))
def test_container_structural_diff(on_device):
# all different keys or shapes
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([[4]], device=on_device),
"b": {
"c": ivy.array([[[5]]], device=on_device),
"e": ivy.array([3], device=on_device),
},
}
)
container_diff = ivy.Container.cont_structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([[4]]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([[[5]]]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))
container_diff_diff_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.cont_to_dict() == container_diff.cont_to_dict()
container_diff_same_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.cont_to_dict() == {}
# some different shapes
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=on_device),
"b": {
"c": ivy.array([[5]], device=on_device),
"d": ivy.array([6], device=on_device),
},
}
)
container_diff = ivy.Container.cont_structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" in container_diff_diff_only["b"]
assert "d" not in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" not in container_diff_same_only["b"]
assert "d" in container_diff_same_only["b"]
# all different keys
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"e": ivy.array([4], device=on_device),
"f": {
"g": ivy.array([5], device=on_device),
"h": ivy.array([6], device=on_device),
},
}
)
container_diff = ivy.Container.cont_structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([4]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([6]))
container_diff_diff_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.cont_to_dict() == container_diff.cont_to_dict()
container_diff_same_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.cont_to_dict() == {}
# some different keys
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=on_device),
"b": {
"c": ivy.array([5], device=on_device),
"e": ivy.array([6], device=on_device),
},
}
)
container_diff = ivy.Container.cont_structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([6]))
container_diff_diff_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" not in container_diff_diff_only["b"]
assert "d" in container_diff_diff_only["b"]
assert "e" in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" in container_diff_same_only["b"]
assert "d" not in container_diff_same_only["b"]
assert "e" not in container_diff_same_only["b"]
# all same
container_0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=on_device),
"b": {
"c": ivy.array([5], device=on_device),
"d": ivy.array([6], device=on_device),
},
}
)
container_diff = ivy.Container.cont_structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.cont_to_dict() == {}
container_diff_same_only = ivy.Container.cont_structural_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.cont_to_dict() == container_diff.cont_to_dict()
def test_container_to_and_from_disk_as_hdf5(on_device):
if ivy.current_backend_str() == "tensorflow":
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.hdf5"
dict_in_1 = {
"a": ivy.array([np.float32(1.0)], device=on_device),
"b": {
"c": ivy.array([np.float32(2.0)], device=on_device),
"d": ivy.array([np.float32(3.0)], device=on_device),
},
}
container1 = Container(dict_in_1)
dict_in_2 = {
"a": ivy.array([np.float32(1.0), np.float32(1.0)], device=on_device),
"b": {
"c": ivy.array([np.float32(2.0), np.float32(2.0)], device=on_device),
"d": ivy.array([np.float32(3.0), np.float32(3.0)], device=on_device),
},
}
container2 = Container(dict_in_2)
# saving
container1.cont_to_disk_as_hdf5(save_filepath, max_batch_size=2)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.cont_from_disk_as_hdf5(save_filepath, slice(1))
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container1.a))
assert np.array_equal(
ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container1.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container1.b.d)
)
# appending
container1.cont_to_disk_as_hdf5(save_filepath, max_batch_size=2, starting_index=1)
assert os.path.exists(save_filepath)
# loading after append
loaded_container = Container.cont_from_disk_as_hdf5(save_filepath)
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container2.a))
assert np.array_equal(
ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container2.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container2.b.d)
)
# load slice
loaded_sliced_container = Container.cont_from_disk_as_hdf5(
save_filepath, slice(1, 2)
)
assert np.array_equal(
ivy.to_numpy(loaded_sliced_container.a), ivy.to_numpy(container1.a)
)
assert np.array_equal(
ivy.to_numpy(loaded_sliced_container.b.c), ivy.to_numpy(container1.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_sliced_container.b.d), ivy.to_numpy(container1.b.d)
)
# file size
file_size, batch_size = Container.h5_file_size(save_filepath)
assert file_size == 6 * np.dtype(np.float32).itemsize
assert batch_size == 2
os.remove(save_filepath)
def test_container_to_and_from_disk_as_json(on_device):
save_filepath = "container_on_disk.json"
dict_in = {
"a": 1.274e-7,
"b": {"c": True, "d": ivy.array([np.float32(3.0)], device=on_device)},
}
container = Container(dict_in)
# saving
container.cont_to_disk_as_json(save_filepath)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.cont_from_disk_as_json(save_filepath)
assert np.array_equal(loaded_container.a, container.a)
assert np.array_equal(loaded_container.b.c, container.b.c)
assert isinstance(loaded_container.b.d, str)
os.remove(save_filepath)
def test_container_to_and_from_disk_as_pickled(on_device):
save_filepath = "container_on_disk.pickled"
dict_in = {
"a": ivy.array([np.float32(1.0)], device=on_device),
"b": {
"c": ivy.array([np.float32(2.0)], device=on_device),
"d": ivy.array([np.float32(3.0)], device=on_device),
},
}
container = Container(dict_in)
# paddle tansor can't be pickled directly as mentioned
# in the issue https://github.com/PaddlePaddle/Paddle/issues/41107
if ivy.backend == "paddle":
container = container.to_numpy()
# saving
container.cont_to_disk_as_pickled(save_filepath)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.cont_from_disk_as_pickled(save_filepath)
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container.a))
assert np.array_equal(
ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container.b.d)
)
os.remove(save_filepath)
def test_container_to_dict(on_device):
container0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([True], device=on_device),
"d": {
"g": ivy.array([2.0], device=on_device),
"h": ivy.array([3], device=on_device),
},
},
}
)
res = ivy.Container.cont_to_dict(container0)
assert res == {"a": 1, "b": {"c": True, "d": {"g": 2.0, "h": 3}}}
def test_container_to_disk_shuffle_and_from_disk_as_hdf5(on_device):
if ivy.current_backend_str() == "tensorflow":
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.hdf5"
dict_in = {
"a": ivy.array([1, 2, 3], device=on_device),
"b": {
"c": ivy.array([1, 2, 3], device=on_device),
"d": ivy.array([1, 2, 3], device=on_device),
},
}
container = Container(dict_in)
# saving
container.cont_to_disk_as_hdf5(save_filepath, max_batch_size=3)
assert os.path.exists(save_filepath)
# shuffling
Container.shuffle_h5_file(save_filepath)
# loading
container_shuffled = Container.cont_from_disk_as_hdf5(save_filepath, slice(3))
# testing
data = np.array([1, 2, 3])
random.seed(0)
random.shuffle(data)
assert (ivy.to_numpy(container_shuffled["a"]) == data).all()
assert (ivy.to_numpy(container_shuffled.a) == data).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == data).all()
assert (ivy.to_numpy(container_shuffled["b"]["d"]) == data).all()
assert (ivy.to_numpy(container_shuffled.b.d) == data).all()
os.remove(save_filepath)
def test_container_to_flat_list(on_device):
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
container = Container(dict_in)
container_flat_list = container.cont_to_flat_list()
for value, expected_value in zip(
container_flat_list,
[
ivy.array([1], device=on_device),
ivy.array([2], device=on_device),
ivy.array([3], device=on_device),
],
):
assert value == expected_value
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_to_iterator(include_empty, on_device):
a_val = Container() if include_empty else ivy.array([1], device=on_device)
bc_val = Container() if include_empty else ivy.array([2], device=on_device)
bd_val = Container() if include_empty else ivy.array([3], device=on_device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.cont_to_iterator(include_empty=include_empty)
for (key_chain, value), expected in zip(
container_iterator, [("a", a_val), ("b/c", bc_val), ("b/d", bd_val)]
):
expected_key_chain = expected[0]
expected_value = expected[1]
assert key_chain == expected_key_chain
assert value is expected_value
# with leaf keys
container_iterator = container.cont_to_iterator(
leaf_keys_only=True, include_empty=include_empty
)
for (key_chain, value), expected in zip(
container_iterator, [("a", a_val), ("c", bc_val), ("d", bd_val)]
):
expected_key_chain = expected[0]
expected_value = expected[1]
assert key_chain == expected_key_chain
assert value is expected_value
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_to_iterator_keys(include_empty, on_device):
a_val = Container() if include_empty else ivy.array([1], device=on_device)
bc_val = Container() if include_empty else ivy.array([2], device=on_device)
bd_val = Container() if include_empty else ivy.array([3], device=on_device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.cont_to_iterator_keys(include_empty=include_empty)
for key_chain, expected_key_chain in zip(container_iterator, ["a", "b/c", "b/d"]):
assert key_chain == expected_key_chain
# with leaf keys
container_iterator = container.cont_to_iterator_keys(
leaf_keys_only=True, include_empty=include_empty
)
for key, expected_key in zip(container_iterator, ["a", "c", "d"]):
assert key == expected_key
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_to_iterator_values(include_empty, on_device):
a_val = Container() if include_empty else ivy.array([1], device=on_device)
bc_val = Container() if include_empty else ivy.array([2], device=on_device)
bd_val = Container() if include_empty else ivy.array([3], device=on_device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.cont_to_iterator_values(include_empty=include_empty)
for value, expected_value in zip(container_iterator, [a_val, bc_val, bd_val]):
assert value is expected_value
def test_container_to_nested_list(on_device):
container0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([True], device=on_device),
"d": {
"g": ivy.array([2.0], device=on_device),
"h": ivy.array([3], device=on_device),
},
},
}
)
res = ivy.Container.cont_to_nested_list(container0)
assert res == [1, [True, [2.0, 3]]]
def test_container_to_raw(on_device):
tuple_in = (
ivy.array([1], device=on_device),
(ivy.array([2], device=on_device), ivy.array([3], device=on_device)),
)
container = Container(tuple_in, types_to_iteratively_nest=[tuple])
raw = container.cont_to_raw()
assert np.allclose(ivy.to_numpy(raw[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(raw[1][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(raw[1][1]), np.array([3]))
def test_container_trim_key(on_device):
key = "abcdefg"
max_length = 3
trimmed_key = ivy.Container.cont_trim_key(key, max_length)
assert trimmed_key == "adg"
def test_container_try_kc(on_device):
cont = Container(
{
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
)
assert cont.cont_try_kc("a") == cont.a
assert cont.cont_try_kc("b/c") == cont.b.c
assert cont.cont_try_kc("b/d") == cont.b.d
assert cont.cont_try_kc("b/e") is cont
def test_container_unify(on_device):
# on_devices and containers
on_devices = []
dev0 = on_device
on_devices.append(dev0)
conts = {}
conts[dev0] = Container(
{
"a": ivy.array([1], device=dev0),
"b": {"c": ivy.array([2], device=dev0), "d": ivy.array([3], device=dev0)},
}
)
if "gpu" in on_device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = on_device[:-1] + str(idx)
on_devices.append(dev1)
conts[dev1] = Container(
{
"a": ivy.array([4], device=dev1),
"b": {
"c": ivy.array([5], device=dev1),
"d": ivy.array([6], device=dev1),
},
}
)
# test
container_unified = ivy.Container.cont_unify(conts, dev0, "concat", 0)
assert np.allclose(ivy.to_numpy(container_unified.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_unified.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_unified.b.d[0]), np.array([3]))
if len(on_devices) > 1:
assert np.allclose(ivy.to_numpy(container_unified.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_unified.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_unified.b.d[1]), np.array([6]))
def test_container_unstack_conts(on_device):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=on_device),
"b": {
"c": ivy.array([[2], [3], [4]], device=on_device),
"d": ivy.array([[3], [4], [5]], device=on_device),
},
}
container = Container(dict_in)
# without key_chains specification
container_unstacked = container.cont_unstack_conts(0)
for cont, a, bc, bd in zip(container_unstacked, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont["a"]), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont["b"]["c"]), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont["b"]["d"]), np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d), np.array([bd]))
def test_container_with_default_key_color(on_device):
cont = Container(
{
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
)
default_default_key_color = cont._default_key_color
id_cont = id(cont)
cont1 = cont.cont_with_default_key_color("red")
assert cont1._default_key_color == "red"
assert id(cont1) != id(cont)
assert cont._default_key_color == default_default_key_color
assert cont.b._default_key_color == default_default_key_color
assert cont._default_key_color != cont1._default_key_color
cont.cont_with_default_key_color("red", inplace=True)
assert cont._default_key_color == "red"
assert cont.b._default_key_color == "red"
assert id(cont) == id_cont
def test_container_with_entries_as_lists(on_device):
if ivy.current_backend_str() == "tensorflow":
# to_list() requires eager execution
pytest.skip()
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {"c": ivy.array([2.0], device=on_device), "d": "some string"},
}
container = Container(dict_in)
container_w_list_entries = container.cont_with_entries_as_lists()
for (key, value), expected_value in zip(
container_w_list_entries.cont_to_iterator(), [[1], [2.0], "some string"]
):
assert value == expected_value
def test_container_with_ivy_backend(on_device):
container0 = Container(
{
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([1], device=on_device),
"d": ivy.array([2], device=on_device),
},
}
)
id_container0 = id(container0)
container0 = ivy.Container.cont_with_ivy_backend(container0, "numpy")
assert container0.cont_config["ivyh"] == "numpy"
assert id_container0 != id(container0)
id_container0 = id(container0)
ivy.Container.cont_with_ivy_backend(container0, "torch", inplace=True)
assert container0.cont_config["ivyh"] == "torch"
assert id(container0) == id_container0
def test_container_with_key_length_limit(on_device):
cont = Container(
{
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
)
default_key_length_limit = cont._key_length_limit
id_cont = id(cont)
cont1 = cont.cont_with_key_length_limit(5)
assert cont1._key_length_limit == 5
assert id(cont1) != id(cont)
assert cont._key_length_limit == default_key_length_limit
assert cont.b._key_length_limit == default_key_length_limit
assert cont._key_length_limit != cont1._key_length_limit
cont.cont_with_key_length_limit(5, inplace=True)
assert cont._key_length_limit == 5
assert cont.b._key_length_limit == 5
assert id(cont) == id_cont
def test_container_with_print_indent(on_device):
cont = Container(
{
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
)
default_print_indent = cont._print_indent
id_cont = id(cont)
cont1 = cont.cont_with_print_indent(default_print_indent + 5)
assert cont1._print_indent == default_print_indent + 5
assert id(cont1) != id(cont)
assert cont._print_indent == default_print_indent
assert cont.b._print_indent == default_print_indent
assert cont._print_indent != cont1._print_indent
cont.cont_with_print_indent(default_print_indent + 5, inplace=True)
assert cont._print_indent == default_print_indent + 5
assert cont.b._print_indent == default_print_indent + 5
assert id(cont) == id_cont
def test_container_with_print_limit(on_device):
cont = Container(
{
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
)
default_print_limit = cont._print_limit
id_cont = id(cont)
cont1 = cont.cont_with_print_limit(default_print_limit + 5)
assert cont1._print_limit == default_print_limit + 5
assert id(cont1) != id(cont)
assert cont._print_limit == default_print_limit
assert cont._print_limit != cont1._print_limit
cont.cont_with_print_limit(default_print_limit + 5, inplace=True)
assert cont._print_limit == default_print_limit + 5
assert cont.b._print_limit == default_print_limit + 5
assert id(cont) == id_cont
def test_container_with_print_line_spacing(on_device):
cont = Container(
{
"a": ivy.array([0.0], device=on_device),
"b": {
"c": ivy.array([1.0], device=on_device),
"d": ivy.array([2.0], device=on_device),
},
}
)
default_print_line_spacing = cont._print_line_spacing
id_cont = id(cont)
cont1 = cont.cont_with_print_line_spacing(default_print_line_spacing + 5)
assert cont1._print_line_spacing == default_print_line_spacing + 5
assert id(cont1) != id(cont)
assert cont._print_line_spacing == default_print_line_spacing
assert cont.b._print_line_spacing == default_print_line_spacing
assert cont._print_line_spacing != cont1._print_line_spacing
cont.cont_with_print_line_spacing(default_print_line_spacing + 5, inplace=True)
assert cont._print_line_spacing == default_print_line_spacing + 5
assert cont.b._print_line_spacing == default_print_line_spacing + 5
assert id(cont) == id_cont
def test_jax_pytree_compatibility(on_device):
if ivy.current_backend_str() != "jax":
pytest.skip()
# import
from jax.tree_util import tree_flatten
# dict in
dict_in = {
"a": ivy.array([1], device=on_device),
"b": {
"c": ivy.array([2], device=on_device),
"d": ivy.array([3], device=on_device),
},
}
# container
container = Container(dict_in)
# container flattened
cont_values = tree_flatten(container)[0]
# dict flattened
true_values = tree_flatten(dict_in)[0]
# assertion
for i, true_val in enumerate(true_values):
assert np.array_equal(ivy.to_numpy(cont_values[i]), ivy.to_numpy(true_val))
| ivy/ivy_tests/test_ivy/test_misc/test_container.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_container.py",
"repo_id": "ivy",
"token_count": 61238
} | 64 |
"""Collection of tests for unified neural network activations."""
# global
from hypothesis import strategies as st, assume
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_method
# ELU
@handle_method(
method_tree="stateful.activations.ELU.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=2,
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
method_num_positional_args=helpers.num_positional_args(fn_name="ELU._forward"),
test_gradients=st.just(True),
alpha=helpers.floats(min_value=0.1, max_value=1),
)
def test_elu(
*,
dtype_and_x,
alpha,
test_gradients,
class_name,
method_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0], "alpha": alpha},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# GEGLU
@handle_method(
method_tree="stateful.activations.GEGLU.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
method_num_positional_args=helpers.num_positional_args(fn_name="GEGLU._forward"),
test_gradients=st.just(True),
)
def test_geglu(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
# last dim must be even, this could replaced with a private helper
assume(x[0].shape[-1] % 2 == 0)
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"inputs": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# GELU
@handle_method(
method_tree="stateful.activations.GELU.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=1,
small_abs_safety_factor=1,
safety_factor_scale="linear",
),
approximate=st.booleans(),
method_num_positional_args=helpers.num_positional_args(fn_name="GELU._forward"),
test_gradients=st.just(True),
)
def test_gelu(
*,
dtype_and_x,
approximate,
test_gradients,
method_name,
class_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={"approximate": approximate},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
atol_=1e-2,
rtol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# Hardswish
@handle_method(
method_tree="stateful.activations.Hardswish.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
min_num_dims=2,
),
method_num_positional_args=helpers.num_positional_args(
fn_name="Hardswish._forward"
),
test_gradients=st.just(True),
)
def test_hardswish(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
@handle_method(
method_tree="stateful.activations.LeakyReLU.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"float_and_complex", full=False, key="leaky_relu"
),
large_abs_safety_factor=16,
small_abs_safety_factor=16,
safety_factor_scale="log",
),
alpha=st.floats(min_value=-1e-4, max_value=1e-4),
method_num_positional_args=helpers.num_positional_args(
fn_name="LeakyReLU._forward"
),
test_gradients=st.just(True),
)
def test_leaky_relu(
*,
dtype_and_x,
alpha,
test_gradients,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={"alpha": alpha},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# LogSoftmax
@handle_method(
method_tree="stateful.activations.LogSoftmax.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_num_dims=2,
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
axis=helpers.ints(min_value=-1, max_value=0),
method_num_positional_args=helpers.num_positional_args(
fn_name="LogSoftmax._forward"
),
test_gradients=st.just(True),
)
def test_log_softmax(
*,
dtype_and_x,
axis,
test_gradients,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={"axis": axis},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# Logit
@handle_method(
method_tree="stateful.activations.Logit.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
min_num_dims=2,
),
method_num_positional_args=helpers.num_positional_args(fn_name="Logit._forward"),
eps=helpers.floats(min_value=1e-4, max_value=1e-2),
test_gradients=st.just(True),
)
def test_logit(
*,
dtype_and_x,
eps,
test_gradients,
class_name,
method_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0], "eps": eps},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# Logsigmoid
@handle_method(
method_tree="stateful.activations.LogSigmoid.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
min_num_dims=2,
),
method_num_positional_args=helpers.num_positional_args(
fn_name="LogSigmoid._forward"
),
test_gradients=st.just(True),
)
def test_logsigmoid(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
@handle_method(
method_tree="stateful.activations.Mish.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
method_num_positional_args=helpers.num_positional_args(fn_name="Mish._forward"),
test_gradients=st.just(True),
)
def test_mish(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# PReLU
@handle_method(
method_tree="stateful.activations.PReLU.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=2,
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
method_num_positional_args=helpers.num_positional_args(fn_name="PReLU._forward"),
test_gradients=st.just(True),
)
def test_prelu(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0], "slope": x[1]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
@handle_method(
method_tree="stateful.activations.ReLU.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
method_num_positional_args=helpers.num_positional_args(fn_name="ReLU._forward"),
test_gradients=st.just(True),
)
def test_relu(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# ReLU6
@handle_method(
method_tree="stateful.activations.ReLU6.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
min_num_dims=2,
),
method_num_positional_args=helpers.num_positional_args(fn_name="ReLU6._forward"),
test_gradients=st.just(True),
)
def test_relu6(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# SeLU
@handle_method(
method_tree="stateful.activations.SeLU.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
min_num_dims=2,
),
method_num_positional_args=helpers.num_positional_args(fn_name="SeLU._forward"),
test_gradients=st.just(True),
)
def test_selu(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# Sigmoid
@handle_method(
method_tree="stateful.activations.Sigmoid.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
min_num_dims=2,
),
method_num_positional_args=helpers.num_positional_args(fn_name="Sigmoid._forward"),
test_gradients=st.just(True),
)
def test_sigmoid(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
@handle_method(
method_tree="stateful.activations.SiLU.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
method_num_positional_args=helpers.num_positional_args(fn_name="SiLU._forward"),
test_gradients=st.just(True),
)
def test_silu(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
@handle_method(
method_tree="stateful.activations.Softmax.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_num_dims=1,
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
),
axis=helpers.ints(min_value=-1, max_value=0),
method_num_positional_args=helpers.num_positional_args(fn_name="Softmax._forward"),
test_gradients=st.just(True),
)
def test_softmax(
*,
dtype_and_x,
axis,
test_gradients,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0], "axis": axis},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
@handle_method(
method_tree="stateful.activations.Softplus.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
beta=st.one_of(helpers.number(min_value=0.1, max_value=10), st.none()),
threshold=st.one_of(helpers.number(min_value=0.1, max_value=30), st.none()),
method_num_positional_args=helpers.num_positional_args(fn_name="Softplus._forward"),
test_gradients=st.just(True),
)
def test_softplus(
*,
dtype_and_x,
beta,
threshold,
test_gradients,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0], "beta": beta, "threshold": threshold},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
# Tanh
@handle_method(
method_tree="stateful.activations.Tanh.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
min_num_dims=2,
),
method_num_positional_args=helpers.num_positional_args(fn_name="Tanh._forward"),
test_gradients=st.just(True),
)
def test_tanh(
*,
dtype_and_x,
test_gradients,
class_name,
method_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
init_all_as_kwargs_np={},
method_all_as_kwargs_np={"x": x[0]},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_stateful/test_activations.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_stateful/test_activations.py",
"repo_id": "ivy",
"token_count": 11062
} | 65 |
import sys
from get_all_tests import BACKENDS
def main():
if len(sys.argv) < 2:
return
test = sys.argv[1]
with open("tests_to_run", "w") as f:
if "," in test:
f.write(test + "\n")
else:
for backend in BACKENDS:
f.write(f"{test},{backend}\n")
if __name__ == "__main__":
main()
| ivy/scripts/setup_tests/setup_tests.py/0 | {
"file_path": "ivy/scripts/setup_tests/setup_tests.py",
"repo_id": "ivy",
"token_count": 188
} | 66 |
<component name="InspectionProjectProfileManager">
<settings>
<option name="PROJECT_PROFILE" value="Default" />
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
| ivy/.idea/inspectionProfiles/profiles_settings.xml/0 | {
"file_path": "ivy/.idea/inspectionProfiles/profiles_settings.xml",
"repo_id": "ivy",
"token_count": 78
} | 0 |
> 🚀 We are granting access to **Ivy\'s Tracer and Transpiler**
> to all of our users, [sign up on our console](https://console.unify.ai/) if you
> want to test them out!
<img class="only-dark" width="100%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logo_dark.png#gh-dark-mode-only"/>
<img class="only-light" width="100%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logo.png?raw=true#gh-light-mode-only"/>
------------------------------------------------------------------------
<div style="display: block;" align="center">
<a href="https://unify.ai/">
<img class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/website_button.svg">
</a>
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<a href="https://unify.ai/docs/ivy">
<img class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/docs_button.svg">
</a>
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<a href="https://unify.ai/demos">
<img class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/examples_button.svg">
</a>
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<a href="https://unify.ai/docs/ivy/overview/design.html">
<img class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/design_button.svg">
</a>
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<a href="https://unify.ai/docs/ivy/overview/faq.html">
<img class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/faq_button.svg">
</a>
</div>
------------------------------------------------------------------------
# Status
<div>
<a href="https://github.com/unifyai/ivy/issues">
<img class="dark-light" style="padding-right: 4px; padding-bottom: 4px;" src="https://img.shields.io/github/issues/unifyai/ivy">
</a>
<a href="https://github.com/unifyai/ivy/network/members">
<img class="dark-light" style="padding-right: 4px; padding-bottom: 4px;" src="https://img.shields.io/github/forks/unifyai/ivy">
</a>
<a href="https://github.com/unifyai/ivy/stargazers">
<img class="dark-light" style="padding-right: 4px; padding-bottom: 4px;" src="https://img.shields.io/github/stars/unifyai/ivy">
</a>
<a href="https://github.com/unifyai/ivy/pulls">
<img class="dark-light" style="padding-right: 4px; padding-bottom: 4px;" src="https://img.shields.io/badge/PRs-welcome-brightgreen.svg">
</a>
<a href="https://pypi.org/project/ivy">
<img class="dark-light" style="padding-right: 4px; padding-bottom: 4px;" src="https://badge.fury.io/py/ivy.svg">
</a>
<a href="https://github.com/unifyai/ivy/actions?query=workflow%3Adocs">
<img class="dark-light" style="padding-right: 4px; padding-bottom: 4px;" src="https://github.com/unifyai/ivy/actions/workflows/docs.yml/badge.svg">
</a>
<a href="https://github.com/unifyai/ivy/actions?query=workflow%3Atest-ivy">
<img class="dark-light" style="padding-right: 4px; padding-bottom: 4px;" src="https://github.com/unifyai/ivy/actions/workflows/intelligent-tests.yml/badge.svg">
</a>
<a href="https://discord.gg/sXyFF8tDtm">
<img class="dark-light" style="padding-right: 4px; padding-bottom: 4px;" src="https://img.shields.io/discord/799879767196958751?color=blue&label=%20&logo=discord&logoColor=white">
</a>
</div>
<br clear="all" />
------------------------------------------------------------------------
# Unified AI
<div style="display: block;" align="center">
<div>
<a href="https://jax.readthedocs.io">
<img class="dark-light" width="10%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/jax_logo.png">
</a>
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<a href="https://www.tensorflow.org">
<img class="dark-light" width="10%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/tensorflow_logo.png">
</a>
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<a href="https://pytorch.org">
<img class="dark-light" width="10%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/pytorch_logo.png">
</a>
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<img class="dark-light" width="5%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/empty.png">
<a href="https://numpy.org">
<img class="dark-light" width="10%" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/numpy_logo.png">
</a>
</div>
</div>
<br clear="all" />
------------------------------------------------------------------------
Ivy is an open-source machine learning framework that
enables you to:
- 🔥 **Autotune your model**: Automatically find the optimal framework, compiler infrastructure and hardware for your specific use case using `ivy.autotune`.
- 🔄 **Convert code into any framework**: Use and build on top of any model, library, or device by converting any code from one framework to another using `ivy.transpile`.
- ⚒️ **Write framework-agnostic code**: Write your code once in ivy and then choose the most appropriate ML framework as the backend to leverage all the benefits and tools.
[Join our growing community](https://discord.com/invite/sXyFF8tDtm) 🌍 to connect with people using Ivy. **Let\'s** [unify.ai](https://unify.ai) **together 🦾**
------------------------------------------------------------------------
# Getting started
The best way to get familiar with Ivy is to go through the [Demos](https://unify.ai/docs/ivy/demos/examples_and_demos.html), a good starting point is [Learn The Basics](https://unify.ai/docs/ivy/demos/learn_the_basics.html).
The most important notebooks are:
- [How to convert your code between frameworks?](https://unify.ai/docs/ivy/demos/learn_the_basics/04_transpile_code.html)
- [How to write framework-agnostic code?](https://unify.ai/docs/ivy/demos/learn_the_basics/01_write_ivy_code.html)
- Accelerate your development (WIP)
- Autotune and optimize models (WIP)
------------------------------------------------------------------------
## Installing ivy
There are various ways to use Ivy, depending on your preferred
environment:
### Installing using pip
The easiest way to set up Ivy is to install it using pip with the
following command:
``` bash
pip install ivy
```
or alternatively:
``` bash
python3 -m pip install ivy
```
<details>
<summary>Docker</summary>
If you prefer to use containers, we also have pre-built Docker images
with all the supported frameworks and some relevant packages already
installed, which you can pull from:
``` bash
docker pull unifyai/ivy:latest
```
If you are working on a GPU device, you can pull from:
``` bash
docker pull unifyai/ivy:latest-gpu
```
</details>
<details>
<summary>From Source</summary>
You can also install Ivy from source if you want to take advantage of
the latest changes, but we can\'t ensure everything will work as
expected. :sweat_smile:
``` bash
git clone https://github.com/unifyai/ivy.git
cd ivy
pip install --user -e .
```
or alternatively, for the last step:
``` bash
python3 -m pip install --user -e .
```
If you want to set up testing and various frameworks it\'s probably best
to check out the [Contributing - Setting
Up](https://unify.ai/docs/ivy/overview/contributing/setting_up. html#setting-up)
page, where OS-specific and IDE-specific instructions and video
tutorials to do so are available!
</details>
------------------------------------------------------------------------
## Using Ivy
After installing Ivy, you can start using it straight away, for example:
<details>
<summary><b>Transpiling any code from one framework to another</b></summary>
``` python
import ivy
import torch
import jax
def jax_fn(x):
a = jax.numpy.dot(x, x)
b = jax.numpy.mean(x)
return x * a + b
jax_x = jax.numpy.array([1, 2, 3])
torch_x = torch.tensor([1, 2, 3])
torch_fn = ivy.transpile(jax_fn, source="jax", to="torch", args=(jax_x,))
ret = torch_fn(torch_x)
```
</details>
<details>
<summary><b>Running your code with any backend</b></summary>
``` python
import ivy
import torch
import jax
ivy.set_backend("jax")
x = jax.numpy.array([1, 2, 3])
y = jax.numpy.array([3, 2, 1])
z = ivy.add(x, y)
ivy.set_backend('torch')
x = torch.tensor([1, 2, 3])
y = torch.tensor([3, 2, 1])
z = ivy.add(x, y)
```
</details>
------------------------------------------------------------------------
# Documentation
You can find Ivy's documentation on the [Docs page](https://unify.ai/docs/ivy/), which includes:
- [Motivation](https://unify.ai/docs/ivy/overview/background.html): This contextualizes the problem Ivy is trying to solve by going over
- The current [ML Explosion](https://unify.ai/docs/ivy/overview/background/ml_explosion.html#ml-explosion).
- Explaining why it is important [to solve this problem](https://unify.ai/docs/ivy/overview/background/why_unify.html#why-unify).
- Explaining how we adhere to existing [standards](https://unify.ai/docs/ivy/overview/background/standardization.html#standardization) to make this happen.
- [Related Work](https://unify.ai/docs/ivy/overview/related_work.html): Which paints a picture of the role Ivy plays in the ML stack, comparing it to other existing solutions in terms of functionalities and abstraction level.
- [Design](https://unify.ai/docs/ivy/overview/design.html): A user-focused guide about the design decision behind the architecture and the main building blocks of Ivy.
- [Deep Dive](https://unify.ai/docs/ivy/overview/deep_dive.html): Which delves deeper into the implementation details of Ivy and is oriented towards potential contributors to the code base.
------------------------------------------------------------------------
# Examples
The [Examples page](https://unify.ai/demos/) features a wide range of
demos and tutorials showcasing the functionalities of Ivy along with
multiple use cases, but feel free to check out some shorter
framework-specific examples here ⬇️
<details>
<summary><b>I'm using PyTorch <img class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/torch_small_logo.png"></b></summary>
<blockquote>You can use Ivy to get PyTorch code from:
<details>
<summary>Any model</summary>
<blockquote>
<details>
<summary>From TensorFlow</summary>
``` python
import ivy
import torch
import tensorflow as tf
# Get a pretrained keras model
eff_encoder = tf.keras.applications.efficientnet_v2.EfficientNetV2B0(
include_top=False, weights="imagenet", input_shape=(224, 224, 3)
)
# Transpile it into a torch.nn.Module with the corresponding parameters
noise = tf.random.normal(shape=(1, 224, 224, 3))
torch_eff_encoder = ivy.transpile(eff_encoder, to="torch", args=(noise,))
# Build a classifier using the transpiled encoder
class Classifier(torch.nn.Module):
def __init__(self, num_classes=20):
super().__init__()
self.encoder = torch_eff_encoder
self.fc = torch.nn.Linear(1280, num_classes)
def forward(self, x):
x = self.encoder(x)
return self.fc(x)
# Initialize a trainable, customizable, torch.nn.Module
classifier = Classifier()
ret = classifier(torch.rand((1, 244, 244, 3)))
```
</details>
<details>
<summary>From JAX</summary>
``` python
import ivy
import jax
import torch
# Get a pretrained haiku model
# https://unify.ai/demos/scripts/deepmind_perceiver_io.py
from deepmind_perceiver_io import key, perceiver_backbone
# Transpile it into a torch.nn.Module with the corresponding parameters
dummy_input = jax.random.uniform(key, shape=(1, 3, 224, 224))
params = perceiver_backbone.init(rng=key, images=dummy_input)
backbone = ivy.transpile(
perceiver_backbone, to="torch", params_v=params, kwargs={"images": dummy_input}
)
# Build a classifier using the transpiled backbone
class PerceiverIOClassifier(torch.nn.Module):
def __init__(self, num_classes=20):
super().__init__()
self.backbone = backbone
self.max_pool = torch.nn.MaxPool2d((512, 1))
self.flatten = torch.nn.Flatten()
self.fc = torch.nn.Linear(1024, num_classes)
def forward(self, x):
x = self.backbone(images=x)
x = self.flatten(self.max_pool(x))
return self.fc(x)
# Initialize a trainable, customizable, torch.nn.Module
classifier = PerceiverIOClassifier()
ret = classifier(torch.rand((1, 3, 224, 224)))
```
</details>
</blockquote>
</details>
<details>
<summary>Any library</summary>
<blockquote>
<details>
<summary>From Tensorflow</summary>
``` python
import ivy
import torch
import os
os.environ["SM_FRAMEWORK"] = "tf.keras"
import segmentation_models as sm
# transpile sm from tensorflow to torch
torch_sm = ivy.transpile(sm, source="tensorflow", to="torch")
# get some image-like arrays
output = torch.rand((1, 3, 512, 512))
target = torch.rand((1, 3, 512, 512))
# and use the transpiled version of any function from the library!
out = torch_sm.metrics.iou_score(output, target)
```
</details>
<details>
<summary>From JAX</summary>
``` python
import ivy
import rax
import torch
# transpile rax from jax to torch
torch_rax = ivy.transpile(rax, source="jax", to="torch")
# get some arrays
scores = torch.tensor([2.2, 1.3, 5.4])
labels = torch.tensor([1.0, 0.0, 0.0])
# and use the transpiled version of any function from the library!
out = torch_rax.poly1_softmax_loss(scores, labels)
```
</details>
<details>
<summary>From NumPy</summary>
``` python
import ivy
import torch
import madmom
# transpile madmon from numpy to torch
torch_madmom = ivy.transpile(madmom, source="numpy", to="torch")
# get some arrays
freqs = torch.arange(20) * 10
# and use the transpiled version of any function from the library!
out = torch_madmom.audio.filters.hz2midi(freqs)
```
</details>
</blockquote>
</details>
<details>
<summary>Any function</summary>
<blockquote>
<details>
<summary>From Tensorflow</summary>
``` python
import ivy
import tensorflow as tf
import torch
def loss(predictions, targets):
return tf.sqrt(tf.reduce_mean(tf.square(predictions - targets)))
# transpile any function from tf to torch
torch_loss = ivy.transpile(loss, source="tensorflow", to="torch")
# get some arrays
p = torch.tensor([3.0, 2.0, 1.0])
t = torch.tensor([0.0, 0.0, 0.0])
# and use the transpiled version!
out = torch_loss(p, t)
```
</details>
<details>
<summary>From JAX</summary>
``` python
import ivy
import jax.numpy as jnp
import torch
def loss(predictions, targets):
return jnp.sqrt(jnp.mean((predictions - targets) ** 2))
# transpile any function from jax to torch
torch_loss = ivy.transpile(loss, source="jax", to="torch")
# get some arrays
p = torch.tensor([3.0, 2.0, 1.0])
t = torch.tensor([0.0, 0.0, 0.0])
# and use the transpiled version!
out = torch_loss(p, t)
```
</details>
<details>
<summary>From NumPy</summary>
``` python
import ivy
import numpy as np
import torch
def loss(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
# transpile any function from numpy to torch
torch_loss = ivy.transpile(loss, source="numpy", to="torch")
# get some arrays
p = torch.tensor([3.0, 2.0, 1.0])
t = torch.tensor([0.0, 0.0, 0.0])
# and use the transpiled version!
out = torch_loss(p, t)
```
</details>
</blockquote>
</details>
</blockquote>
</details>
<details>
<summary><b>I'm using TensorFlow <img class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/tf_small_logo.png"></b></summary>
<blockquote>You can use Ivy to get TensorFlow code from:
<details>
<summary>Any model</summary>
<blockquote>
<details>
<summary>From PyTorch</summary>
``` python
import ivy
import torch
import timm
import tensorflow as tf
# Get a pretrained pytorch model
mlp_encoder = timm.create_model("mixer_b16_224", pretrained=True, num_classes=0)
# Transpile it into a keras.Model with the corresponding parameters
noise = torch.randn(1, 3, 224, 224)
mlp_encoder = ivy.transpile(mlp_encoder, to="tensorflow", args=(noise,))
# Build a classifier using the transpiled encoder
class Classifier(tf.keras.Model):
def __init__(self):
super().__init__()
self.encoder = mlp_encoder
self.output_dense = tf.keras.layers.Dense(units=1000, activation="softmax")
def call(self, x):
x = self.encoder(x)
return self.output_dense(x)
# Transform the classifier and use it as a standard keras.Model
x = tf.random.normal(shape=(1, 3, 224, 224))
model = Classifier()
ret = model(x)
```
</details>
<details>
<summary>From JAX</summary>
``` python
import ivy
import jax
import tensorflow as tf
# Get a pretrained haiku model
# https://unify.ai/demos/scripts/deepmind_perceiver_io.py
from deepmind_perceiver_io import key, perceiver_backbone
# Transpile it into a tf.keras.Model with the corresponding parameters
dummy_input = jax.random.uniform(key, shape=(1, 3, 224, 224))
params = perceiver_backbone.init(rng=key, images=dummy_input)
backbone = ivy.transpile(
perceiver_backbone, to="tensorflow", params_v=params, args=(dummy_input,)
)
# Build a classifier using the transpiled backbone
class PerceiverIOClassifier(tf.keras.Model):
def __init__(self, num_classes=20):
super().__init__()
self.backbone = backbone
self.max_pool = tf.keras.layers.MaxPooling1D(pool_size=512)
self.flatten = tf.keras.layers.Flatten()
self.fc = tf.keras.layers.Dense(num_classes)
def call(self, x):
x = self.backbone(x)
x = self.flatten(self.max_pool(x))
return self.fc(x)
# Initialize a trainable, customizable, tf.keras.Model
x = tf.random.normal(shape=(1, 3, 224, 224))
classifier = PerceiverIOClassifier()
ret = classifier(x)
```
</details>
</blockquote>
</details>
<details>
<summary>Any library</summary>
<blockquote>
<details>
<summary>From PyTorch</summary>
``` python
import ivy
import kornia
import requests
import numpy as np
import tensorflow as tf
from PIL import Image
# transpile kornia from torch to tensorflow
tf_kornia = ivy.transpile(kornia, source="torch", to="tensorflow")
# get an image
url = "http://images.cocodataset.org/train2017/000000000034.jpg"
raw_img = Image.open(requests.get(url, stream=True).raw)
# convert it to the format expected by kornia
img = np.array(raw_img)
img = tf.transpose(tf.constant(img), (2, 0, 1))
img = tf.expand_dims(img, 0) / 255
# and use the transpiled version of any function from the library!
out = tf_kornia.enhance.sharpness(img, 5)
```
</details>
<details>
<summary>From JAX</summary>
``` python
import ivy
import rax
import tensorflow as tf
# transpile rax from jax to tensorflow
tf_rax = ivy.transpile(rax, source="jax", to="tensorflow")
# get some arrays
scores = tf.constant([2.2, 1.3, 5.4])
labels = tf.constant([1.0, 0.0, 0.0])
# and use the transpiled version of any function from the library!
out = tf_rax.poly1_softmax_loss(scores, labels)
```
</details>
<details>
<summary>From NumPy</summary>
``` python
import ivy
import madmom
import tensorflow as tf
# transpile madmom from numpy to tensorflow
tf_madmom = ivy.transpile(madmom, source="numpy", to="tensorflow")
# get some arrays
freqs = tf.range(20) * 10
# and use the transpiled version of any function from the library!
out = tf_madmom.audio.filters.hz2midi(freqs)
```
</details>
</blockquote>
</details>
<details>
<summary>Any function</summary>
<blockquote>
<details>
<summary>From PyTorch</summary>
``` python
import ivy
import torch
import tensorflow as tf
def loss(predictions, targets):
return torch.sqrt(torch.mean((predictions - targets) ** 2))
# transpile any function from torch to tensorflow
tf_loss = ivy.transpile(loss, source="torch", to="tensorflow")
# get some arrays
p = tf.constant([3.0, 2.0, 1.0])
t = tf.constant([0.0, 0.0, 0.0])
# and use the transpiled version!
out = tf_loss(p, t)
```
</details>
<details>
<summary>From JAX</summary>
``` python
import ivy
import jax.numpy as jnp
import tensorflow as tf
def loss(predictions, targets):
return jnp.sqrt(jnp.mean((predictions - targets) ** 2))
# transpile any function from jax to tensorflow
tf_loss = ivy.transpile(loss, source="jax", to="tensorflow")
# get some arrays
p = tf.constant([3.0, 2.0, 1.0])
t = tf.constant([0.0, 0.0, 0.0])
# and use the transpiled version!
out = tf_loss(p, t)
```
</details>
<details>
<summary>From NumPy</summary>
``` python
import ivy
import numpy as np
import tensorflow as tf
def loss(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
# transpile any function from numpy to tensorflow
tf_loss = ivy.transpile(loss, source="numpy", to="tensorflow")
# get some arrays
p = tf.constant([3.0, 2.0, 1.0])
t = tf.constant([0.0, 0.0, 0.0])
# and use the transpiled version!
out = tf_loss(p, t)
```
</details>
</blockquote>
</details>
</blockquote>
</details>
<details>
<summary><b>I'm using Jax <img class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/jax_small_logo.png"></b></summary>
<blockquote>You can use Ivy to get JAX code from:
<details>
<summary>Any model</summary>
<blockquote>
<details>
<summary>From PyTorch</summary>
``` python
import ivy
import timm
import torch
import jax
import haiku as hk
# Get a pretrained pytorch model
mlp_encoder = timm.create_model("mixer_b16_224", pretrained=True, num_classes=0)
# Transpile it into a hk.Module with the corresponding parameters
noise = torch.randn(1, 3, 224, 224)
mlp_encoder = ivy.transpile(mlp_encoder, to="jax", args=(noise,))
# Build a classifier using the transpiled encoder
class Classifier(hk.Module):
def __init__(self, num_classes=1000):
super().__init__()
self.encoder = mlp_encoder()
self.fc = hk.Linear(output_size=num_classes, with_bias=True)
def __call__(self, x):
x = self.encoder(x)
x = self.fc(x)
return x
def _forward_classifier(x):
module = Classifier()
return module(x)
# Transform the classifier and use it as a standard hk.Module
rng_key = jax.random.PRNGKey(42)
x = jax.random.uniform(key=rng_key, shape=(1, 3, 224, 224), dtype=jax.numpy.float32)
forward_classifier = hk.transform(_forward_classifier)
params = forward_classifier.init(rng=rng_key, x=x)
ret = forward_classifier.apply(params, None, x)
```
</details>
<details>
<summary>From TensorFlow</summary>
``` python
import ivy
import jax
import haiku as hk
import tensorflow as tf
# Get a pretrained keras model
eff_encoder = tf.keras.applications.efficientnet_v2.EfficientNetV2B0(
include_top=False, weights="imagenet", input_shape=(224, 224, 3)
)
# Transpile it into a hk.Module with the corresponding parameters
noise = tf.random.normal(shape=(1, 224, 224, 3))
hk_eff_encoder = ivy.transpile(eff_encoder, to="jax", args=(noise,))
# Build a classifier using the transpiled encoder
class Classifier(hk.Module):
def __init__(self, num_classes=1000):
super().__init__()
self.encoder = hk_eff_encoder()
self.fc = hk.Linear(output_size=num_classes, with_bias=True)
def __call__(self, x):
x = self.encoder(x)
x = self.fc(x)
return x
def _forward_classifier(x):
module = Classifier()
return module(x)
# Transform the classifier and use it as a standard hk.Module
rng_key = jax.random.PRNGKey(42)
dummy_x = jax.random.uniform(key=rng_key, shape=(1, 224, 224, 3))
forward_classifier = hk.transform(_forward_classifier)
params = forward_classifier.init(rng=rng_key, x=dummy_x)
ret = forward_classifier.apply(params, None, dummy_x)
```
</details>
</blockquote>
</details>
<details>
<summary>Any library</summary>
<blockquote>
<details>
<summary>From PyTorch</summary>
``` python
import ivy
import kornia
import requests
import jax.numpy as jnp
from PIL import Image
# transpile kornia from torch to jax
jax_kornia = ivy.transpile(kornia, source="torch", to="jax")
# get an image
url = "http://images.cocodataset.org/train2017/000000000034.jpg"
raw_img = Image.open(requests.get(url, stream=True).raw)
# convert it to the format expected by kornia
img = jnp.transpose(jnp.array(raw_img), (2, 0, 1))
img = jnp.expand_dims(img, 0) / 255
# and use the transpiled version of any function from the library!
out = jax_kornia.enhance.sharpness(img, 5)
```
</details>
<details>
<summary>From TensorFlow</summary>
``` python
import ivy
import jax
import os
os.environ["SM_FRAMEWORK"] = "tf.keras"
import segmentation_models as sm
# transpile sm from tensorflow to jax
jax_sm = ivy.transpile(sm, source="tensorflow", to="jax")
# get some image-like arrays
key = jax.random.PRNGKey(23)
key1, key2 = jax.random.split(key)
output = jax.random.uniform(key1, (1, 3, 512, 512))
target = jax.random.uniform(key2, (1, 3, 512, 512))
# and use the transpiled version of any function from the library!
out = jax_sm.metrics.iou_score(output, target)
```
</details>
<details>
<summary>From NumPy</summary>
``` python
import ivy
import madmom
import jax.numpy as jnp
# transpile madmon from numpy to jax
jax_madmom = ivy.transpile(madmom, source="numpy", to="jax")
# get some arrays
freqs = jnp.arange(20) * 10
# and use the transpiled version of any function from the library!
out = jax_madmom.audio.filters.hz2midi(freqs)
```
</details>
</blockquote>
</details>
<details>
<summary>Any function</summary>
<blockquote>
<details>
<summary>From PyTorch</summary>
``` python
import ivy
import torch
import jax.numpy as jnp
def loss(predictions, targets):
return torch.sqrt(torch.mean((predictions - targets) ** 2))
# transpile any function from torch to jax
jax_loss = ivy.transpile(loss, source="torch", to="jax")
# get some arrays
p = jnp.array([3.0, 2.0, 1.0])
t = jnp.array([0.0, 0.0, 0.0])
# and use the transpiled version!
out = jax_loss(p, t)
```
</details>
<details>
<summary>From TensorFlow</summary>
``` python
import ivy
import tensorflow as tf
import jax.numpy as jnp
def loss(predictions, targets):
return tf.sqrt(tf.reduce_mean(tf.square(predictions - targets)))
# transpile any function from tf to jax
jax_loss = ivy.transpile(loss, source="tensorflow", to="jax")
# get some arrays
p = jnp.array([3.0, 2.0, 1.0])
t = jnp.array([0.0, 0.0, 0.0])
# and use the transpiled version!
out = jax_loss(p, t)
```
</details>
<details>
<summary>From NumPy</summary>
``` python
import ivy
import numpy as np
import jax
import jax.numpy as jnp
jax.config.update('jax_enable_x64', True)
def loss(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
# transpile any function from numpy to jax
jax_loss = ivy.transpile(loss, source="numpy", to="jax")
# get some arrays
p = jnp.array([3.0, 2.0, 1.0])
t = jnp.array([0.0, 0.0, 0.0])
# and use the transpiled version!
out = jax_loss(p, t)
```
</details>
</blockquote>
</details>
</blockquote>
</details>
<details>
<summary><b>I'm using NumPy <img class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/logos/supported/numpy_small_logo.png"></b></summary>
<blockquote>You can use Ivy to get NumPy code from:
<details>
<summary>Any library</summary>
<blockquote>
<details>
<summary>From PyTorch</summary>
``` python
import ivy
import kornia
import requests
import numpy as np
from PIL import Image
# transpile kornia from torch to np
np_kornia = ivy.transpile(kornia, source="torch", to="numpy")
# get an image
url = "http://images.cocodataset.org/train2017/000000000034.jpg"
raw_img = Image.open(requests.get(url, stream=True).raw)
# convert it to the format expected by kornia
img = np.transpose(np.array(raw_img), (2, 0, 1))
img = np.expand_dims(img, 0) / 255
# and use the transpiled version of any function from the library!
out = np_kornia.enhance.sharpness(img, 5)
```
</details>
<details>
<summary>From TensorFlow</summary>
``` python
import ivy
import numpy as np
import os
os.environ["SM_FRAMEWORK"] = "tf.keras"
import segmentation_models as sm
# transpile sm from tensorflow to numpy
np_sm = ivy.transpile(sm, source="tensorflow", to="numpy")
# get some image-like arrays
output = np.random.rand(1, 3, 512, 512).astype(dtype=np.float32)
target = np.random.rand(1, 3, 512, 512).astype(dtype=np.float32)
# and use the transpiled version of any function from the library!
out = np_sm.metrics.iou_score(output, target)
```
</details>
<details>
<summary>From Jax</summary>
``` python
import ivy
import rax
import numpy as np
# transpile rax from jax to numpy
np_rax = ivy.transpile(rax, source="jax", to="numpy")
# get some arrays
scores = np.array([2.2, 1.3, 5.4])
labels = np.array([1.0, 0.0, 0.0])
# and use the transpiled version of any function from the library!
out = np_rax.poly1_softmax_loss(scores, labels)
```
</details>
</blockquote>
</details>
<details>
<summary>Any function</summary>
<blockquote>
<details>
<summary>From PyTorch</summary>
``` python
import ivy
import torch
import numpy as np
def loss(predictions, targets):
return torch.sqrt(torch.mean((predictions - targets) ** 2))
# transpile any function from torch to numpy
np_loss = ivy.transpile(loss, source="torch", to="numpy")
# get some arrays
p = np.array([3.0, 2.0, 1.0])
t = np.array([0.0, 0.0, 0.0])
# and use the transpiled version!
out = np_loss(p, t)
```
</details>
<details>
<summary>From TensorFlow</summary>
``` python
import ivy
import tensorflow as tf
import numpy as np
def loss(predictions, targets):
return tf.sqrt(tf.reduce_mean(tf.square(predictions - targets)))
# transpile any function from tf to numpy
np_loss = ivy.transpile(loss, source="tensorflow", to="numpy")
# get some arrays
p = np.array([3.0, 2.0, 1.0])
t = np.array([0.0, 0.0, 0.0])
# and use the transpiled version!
out = np_loss(p, t)
```
</details>
<details>
<summary>From JAX</summary>
``` python
import ivy
import jax.numpy as jnp
import numpy as np
def loss(predictions, targets):
return jnp.sqrt(jnp.mean((predictions - targets) ** 2))
# transpile any function from jax to numpy
np_loss = ivy.transpile(loss, source="jax", to="numpy")
# get some arrays
p = np.array([3.0, 2.0, 1.0])
t = np.array([0.0, 0.0, 0.0])
# and use the transpiled version!
out = np_loss(p, t)
```
</details>
</blockquote>
</details>
</blockquote>
</details>
<details>
<summary>
<b>I'm using Ivy <img height="25px" width="25px" class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/logos/ivy_logo_only.svg"></b>
</summary>
Or you can use Ivy as a framework, breaking yourself (and your code)
free from deciding which community to support, allowing anyone to run
your code in their framework of choice!
``` python
import ivy
# A simple image classification model
class IvyNet(ivy.Module):
def __init__(
self,
h_w=(32, 32),
input_channels=3,
output_channels=512,
num_classes=2,
data_format="NCHW",
device="cpu",
):
self.h_w = h_w
self.input_channels = input_channels
self.output_channels = output_channels
self.num_classes = num_classes
self.data_format = data_format
self.device = device
super().__init__()
def _build(self, *args, **kwargs):
self.extractor = ivy.Sequential(
ivy.Conv2D(self.input_channels, 6, [5, 5], 1, "SAME", data_format=self.data_format),
ivy.GELU(),
ivy.Conv2D(6, 16, [5, 5], 1, "SAME", data_format=self.data_format),
ivy.GELU(),
ivy.Conv2D(16, self.output_channels, [5, 5], 1, "SAME", data_format=self.data_format),
ivy.GELU(),
)
self.classifier = ivy.Sequential(
# Since the padding is "SAME", this would be image_height x image_width x output_channels
ivy.Linear(self.h_w[0] * self.h_w[1] * self.output_channels, 512),
ivy.GELU(),
ivy.Linear(512, self.num_classes),
)
def _forward(self, x):
x = self.extractor(x)
# flatten all dims except batch dim
x = ivy.flatten(x, start_dim=1, end_dim=-1)
logits = self.classifier(x)
probs = ivy.softmax(logits)
return logits, probs
```
After building your model in Ivy, you can set your favourite framework
as the backend to use its operations under the hood!
``` python
ivy.set_backend("torch")
model = IvyNet()
x = torch.randn(1, 3, 32, 32)
logits, probs = model(x)
```
``` python
ivy.set_backend("tensorflow")
model = IvyNet()
x = tf.random.uniform(shape=(1, 3, 32, 32))
logits, probs = model(x)
```
``` python
ivy.set_backend("jax")
model = IvyNet()
x = jax.random.uniform(key, shape=(1, 3, 32, 32))
logits, probs = model(x)
```
``` python
ivy.set_backend("numpy")
model = IvyNet()
x = np.random.uniform(size=(1, 3, 32, 32))
logits, probs = model(x)
```
Last but not least, we can also build the training pipeline in pure ivy
⬇️
<details>
<summary><a>Let's define some helper functions first</a></summary>
``` python
# helper function for loading the dataset in batches
def generate_batches(images, classes, dataset_size, batch_size=32):
targets = {k: v for v, k in enumerate(np.unique(classes))}
y_train = [targets[classes[i]] for i in range(len(classes))]
if batch_size > dataset_size:
raise ivy.utils.exceptions.IvyError("Use a smaller batch size")
for idx in range(0, dataset_size, batch_size):
yield ivy.stack(images[idx : min(idx + batch_size, dataset_size)]), ivy.array(
y_train[idx : min(idx + batch_size, dataset_size)]
)
# helper function to get the number of current predictions
def num_correct(preds, labels):
return (preds.argmax() == labels).sum().to_numpy().item()
# define a loss function
def loss_fn(params):
v, model, x, y = params
y_pred, probs = model(x)
return ivy.cross_entropy(y, probs), probs
```
</details>
<details>
<summary><a>And train this model!</a></summary>
``` python
# train the model on gpu if it's available
device = "cuda:0" if ivy.gpu_is_available() else "cpu"
# training hyperparams
optimizer= ivy.Adam(1e-4)
batch_size = 64
num_epochs = 20
num_classes = 10
model = IvyNet(
h_w=(28, 28),
input_channels=1,
output_channels=120,
num_classes=num_classes,
device=device,
)
model_name = type(model).__name__.lower()
# training loop
def train(images, classes, epochs, model, device, num_classes=10, batch_size=32):
# training metrics
epoch_loss = 0.0
running_loss = 0.0
fields = ["epoch", "epoch_loss", "training_accuracy"]
metrics = []
dataset_size = len(images)
for epoch in range(epochs):
train_loss, train_correct = 0, 0
train_loop = tqdm(
generate_batches(images, classes, len(images), batch_size=batch_size),
total=dataset_size // batch_size,
position=0,
leave=True,
)
for xbatch, ybatch in train_loop:
if device != "cpu":
xbatch, ybatch = xbatch.to_device("gpu:0"), ybatch.to_device("gpu:0")
# Since the cross entropy function expects the target classes to be in one-hot encoded format
ybatch_encoded = ivy.one_hot(ybatch, num_classes)
# update model params
loss_probs, grads = ivy.execute_with_gradients(
loss_fn,
(model.v, model, xbatch, ybatch_encoded),
)
model.v = optimizer.step(model.v, grads["0"])
batch_loss = ivy.to_numpy(loss_probs[0]).mean().item() # batch mean loss
epoch_loss += batch_loss * xbatch.shape[0]
train_correct += num_correct(loss_probs[1], ybatch)
train_loop.set_description(f"Epoch [{epoch + 1:2d}/{epochs}]")
train_loop.set_postfix(
running_loss=batch_loss,
accuracy_percentage=(train_correct / dataset_size) * 100,
)
epoch_loss = epoch_loss / dataset_size
training_accuracy = train_correct / dataset_size
metrics.append([epoch, epoch_loss, training_accuracy])
train_loop.write(
f"\nAverage training loss: {epoch_loss:.6f}, Train Correct: {train_correct}",
end="\n",
)
# write metrics for plotting
with open(f"/{model_name}_train_summary.csv", "w") as f:
f = csv.writer(f)
f.writerow(fields)
f.writerows(metrics)
# assuming the dataset(images and classes) are already prepared in a folder
train(images, classes, num_epochs, model, device, num_classes = num_classes, batch_size = batch_size)
```
</details>
</details>
------------------------------------------------------------------------
# Diving deeper
Although the [Docs](https://unify.ai/docs/ivy/) are the best place to learn more, in the next section we will take a look at how Ivy works both as a transpiler and a framework in a bit more detail to get an idea of why and where to use it.
<details>
<summary><b>Ivy as a transpiler</b></summary>
Ivy\'s transpiler allows you to use code from any other framework (or
from any other version of the same framework!) in your own code, by just
adding one line of code. Under the hood, Ivy traces a computational
graph and leverages the frontends and backends to link one framework to
another.
This way, Ivy makes all ML-related projects available for you,
independently of the framework you want to use to research, develop, or
deploy systems. Feel free to head over to the docs for the full API
reference, but the functions you\'d most likely want to use are:
``` python
# Traces an efficient fully-functional graph from a function, removing all wrapping and redundant code
ivy.trace_graph()
# Converts framework-specific code to a different framework
ivy.transpile()
# Converts framework-specific code to Ivy
ivy.unify()
```
These functions can be used eagerly or lazily. If you pass the necessary
arguments for function tracing, the graph tracing/transpilation step will
happen instantly (eagerly). Otherwise, the graph tracing/transpilation
will happen only when the returned function is first invoked.
``` python
import ivy
import jax
ivy.set_backend("jax")
# Simple JAX function to transpile
def test_fn(x):
return jax.numpy.sum(x)
x1 = ivy.array([1., 2.])
```
``` python
# Arguments are available -> transpilation happens eagerly
eager_graph = ivy.transpile(test_fn, source="jax", to="torch", args=(x1,))
# eager_graph is now torch code and runs efficiently
ret = eager_graph(x1)
```
``` python
# Arguments are not available -> transpilation happens lazily
lazy_graph = ivy.transpile(test_fn, source="jax", to="torch")
# The transpiled graph is initialized, transpilation will happen here
ret = lazy_graph(x1)
# lazy_graph is now torch code and runs efficiently
ret = lazy_graph(x1)
```
If you want to learn more, you can find more information in the [Ivy as
a transpiler section of the
docs!](https://unify.ai/docs/ivy/overview/design/ivy_as_a_transpiler.html)
## When should I use Ivy as a transpiler?
If you want to use building blocks published in other frameworks (neural
networks, layers, array computing libraries, training pipelines\...),
you want to integrate code developed in various frameworks, or maybe
straight up move code from one framework to another, the transpiler is
definitely the tool 🔧 for the job! As the output of transpilation is
native code in the target framework, you can use the converted code just
as if it was code originally developed in that framework, applying
framework-specific optimizations or tools, instantly exposing your
project to all of the unique perks of a different framework.
</details>
<details>
<summary><b>Ivy as a framework</b></summary>
The Ivy framework is built on top of various essential components,
mainly the [Backend
Handler](https://unify.ai/docs/ivy/overview/design/building_blocks.html#backend-handler),
which manages what framework is being used behind the scenes and the
[Backend Functional
APIs](https://unify.ai/docs/ivy/overview/design/building_blocks.html#backend-functional-apis),
which provide framework-specific implementations of the Ivy functions.
Likewise, classes such as `ivy.Container` or `ivy.Array` are also
available, facilitating the use of structured data and array-like
objects (learn more about them
[here!](https://unify.ai/docs/ivy/overview/design/ivy_as_a_framework.html)).
All of the functionalities in Ivy are exposed through the
`Ivy functional API` and the `Ivy stateful API`. All functions in the
[Functional
API](https://unify.ai/docs/ivy/overview/design/building_blocks.html#ivy-functional-api)
are **Framework Agnostic Functions**, which means that we can use them
like this:
``` python
import ivy
import jax.numpy as jnp
import tensorflow as tf
import numpy as np
import torch
def mse_loss(y, target):
return ivy.mean((y - target)**2)
jax_mse = mse_loss(jnp.ones((5,)), jnp.ones((5,)))
tf_mse = mse_loss(tf.ones((5,)), tf.ones((5,)))
np_mse = mse_loss(np.ones((5,)), np.ones((5,)))
torch_mse = mse_loss(torch.ones((5,)), torch.ones((5,)))
```
In the example above we show how Ivy\'s functions are compatible with
tensors from different frameworks. This is the same for ALL Ivy
functions. They can accept tensors from any framework and return the
correct result.
The [Ivy Stateful
API](https://unify.ai/docs/ivy/overview/design/ivy_as_a_framework/ivy_stateful_api.html),
on the other hand, allows you to define trainable modules and layers,
which you can use alone or as a part of any other framework code!
``` python
import ivy
class Regressor(ivy.Module):
def __init__(self, input_dim, output_dim):
self.input_dim = input_dim
self.output_dim = output_dim
super().__init__()
def _build(self, *args, **kwargs):
self.linear0 = ivy.Linear(self.input_dim, 128)
self.linear1 = ivy.Linear(128, self.output_dim)
def _forward(self, x):
x = self.linear0(x)
x = ivy.functional.relu(x)
x = self.linear1(x)
return x
```
If we put it all together, we\'ll have something like this. This example
uses PyTorch as the backend, but this can easily be changed to your
favorite frameworks, such as TensorFlow, or JAX.
``` python
import ivy
class Regressor(ivy.Module):
def __init__(self, input_dim, output_dim):
self.input_dim = input_dim
self.output_dim = output_dim
super().__init__()
def _build(self, *args, **kwargs):
self.linear0 = ivy.Linear(self.input_dim, 128)
self.linear1 = ivy.Linear(128, self.output_dim)
def _forward(self, x):
x = self.linear0(x)
x = ivy.functional.relu(x)
x = self.linear1(x)
return x
ivy.set_backend('torch') # set backend to PyTorch (or any other backend!)
model = Regressor(input_dim=1, output_dim=1)
optimizer = ivy.Adam(0.3)
n_training_examples = 2000
noise = ivy.random.random_normal(shape=(n_training_examples, 1), mean=0, std=0.1)
x = ivy.linspace(-6, 3, n_training_examples).reshape((n_training_examples, 1))
y = 0.2 * x ** 2 + 0.5 * x + 0.1 + noise
def loss_fn(v, x, target):
pred = model(x, v=v)
return ivy.mean((pred - target) ** 2)
for epoch in range(40):
# forward pass
pred = model(x)
# compute loss and gradients
loss, grads = ivy.execute_with_gradients(lambda params: loss_fn(*params), (model.v, x, y))
# update parameters
model.v = optimizer.step(model.v, grads)
# print current loss
print(f'Epoch: {epoch + 1:2d} --- Loss: {ivy.to_numpy(loss).item():.5f}')
print('Finished training!')
```
The model\'s output can be visualized as follows:
<div align="center">
<img width="50%" class="dark-light" src="https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/regressor_lq.gif">
</div>
As always, you can find more information about [Ivy as a framework in
the
docs!](https://unify.ai/docs/ivy/overview/design/ivy_as_a_framework.html)
<h2> When should I use Ivy as a framework? </h2>
As Ivy supports multiple backends, writing code in Ivy breaks you free
from framework limitations. If you want to publish highly flexible code
for everyone to use, independently of the framework they are using, or
you plan to develop ML-related tools and want them to be interoperable
with not only the already existing frameworks, but also with future
frameworks, then Ivy is for you!
</details>
------------------------------------------------------------------------
# Contributing
We believe that everyone can contribute and make a difference. Whether
it\'s writing code 💻, fixing bugs 🐛, or simply sharing feedback 💬,
your contributions are definitely welcome and appreciated 🙌
Check out all of our open tasks, and find out more info in our
[Contributing
guide](https://unify.ai/docs/ivy/overview/contributing.html) in the
docs!
Join our amazing community as a code contributor, and help accelerate
our journey to unify all ML frameworks!
<a href="https://github.com/unifyai/ivy/graphs/contributors">
<img class="dark-light" src="https://contrib.rocks/image?repo=unifyai/ivy&anon=0&columns=20&max=100&r=true" />
</a>
------------------------------------------------------------------------
# Community
In order to achieve the ambitious goal of unifying AI, we definitely need
as many hands as possible on it! Whether you are a seasoned developer or
just starting out, you\'ll find a place here! Join the Ivy community on
our [Discord](https://discord.gg/sXyFF8tDtm) 👾 server, which is the
perfect place to ask questions, share ideas, and get help from both
fellow developers and the Ivy Team directly!
Also! Feel free to follow us on
[Twitter](https://twitter.com/letsunifyai) 🐦 as well, we use it to
share updates, sneak peeks, and all sorts of relevant news, certainly a
great way to stay in the loop 😄
Can\'t wait to see you there!
------------------------------------------------------------------------
# Citation
If you use Ivy for your work, please don\'t forget to give proper credit
by including the accompanying [paper](https://arxiv.org/abs/2102.02886)
📄 in your references. It\'s a small way to show appreciation and help
to continue to support this and other open source projects 🙌
@article{lenton2021ivy,
title={Ivy: Templated deep learning for inter-framework portability},
author={Lenton, Daniel and Pardo, Fabio and Falck, Fabian and James, Stephen and Clark, Ronald},
journal={arXiv preprint arXiv:2102.02886},
year={2021}
}
| ivy/README.md/0 | {
"file_path": "ivy/README.md",
"repo_id": "ivy",
"token_count": 17922
} | 1 |
Building the Docs
=================
This document describes how to build the Ivy docs. If you want to know more about how
our custom building pipeline work, check our `Building the Docs Pipeline
<../deep_dive/building_the_docs_pipeline.rst>`_ deep dive
.. warning::
Be aware that the doc-builder was developed originally for Linux, although, in theory, you can run
it on any platform (supporting either docker or windows), it's only tested it on
Linux. If you find any windows related issues, feel free to open an issue for that to review it.
.. note::
Recommendation:
You can use the convenience script if you build the docs regularly,
as it will not re-download the dependencies.
If you have a slow internet connection, you can use GitHub Codespaces since it will help you to build the
docs faster since our script downloads large dependency files.
Building the Docs using Docker
------------------------------
Using convenience script
~~~~~~~~~~~~~~~~~~~~~~~~
The easiest way to build the docs is to use the ``docs/make_docs.sh`` script.
.. code-block:: bash
cd docs
./make_docs.sh
This script will build the docs for Ivy and store it in ``docs/build``.
Using existing image on Docker Hub
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can also use the ``unifyai/doc-builder`` image hosted on
`Docker Hub <https://hub.docker.com/r/unifyai/doc-builder>`_ to build the
docs.
Run ``docker run`` to build the docs. The following command will build the docs for
the project in the current directory and output them to ``docs/build``.
.. code-block:: bash
cd <ivy directory>
docker run --rm -v $(pwd):/project unifyai/doc-builder
This command will mount the module directory to ``/project`` in the container, the
current directory should be the root of ``ivy``.
Building the image locally
~~~~~~~~~~~~~~~~~~~~~~~~~~
You can also build the image locally. You will first need to clone the ``doc-builder``
repository.
Run this command if you are using HTTPS:
.. code-block:: bash
git clone https://github.com/unifyai/doc-builder.git
Or this command if you are using SSH:
.. code-block:: bash
git clone [email protected]:unifyai/doc-builder.git
Then, run the following command to build the image:
.. code-block:: bash
cd doc-builder
docker build -t unifyai/doc-builder .
Building the Docs without Docker
--------------------------------
You can also build the docs without Docker. You will first need to clone the
``unifyai/doc-builder`` repository. Then use the convenience script
``make_docs_without_docker.sh``.
Run this command if you are using HTTPS:
.. code-block:: bash
git clone https://github.com/unifyai/doc-builder.git
Or this command if you are using SSH:
.. code-block:: bash
git clone [email protected]:unifyai/doc-builder.git
Then, run the following command to build the docs:
.. code-block:: bash
cd doc-builder
./make_docs_without_docker.sh <ivy directory>
The script will install the required dependencies for `sphinx <https://www.sphinx-doc.org>`_
which is used to build the docs, as well as dependencies required by Ivy. Then it will
build the docs for Ivy and store it in ``docs/build``.
| ivy/docs/overview/contributing/building_the_docs.rst/0 | {
"file_path": "ivy/docs/overview/contributing/building_the_docs.rst",
"repo_id": "ivy",
"token_count": 902
} | 2 |
Devices
=======
.. _`backend setting`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/backend_handler.py#L204
.. _`infer_device`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/func_wrapper.py#L286
.. _`ivy.Device`: https://github.com/unifyai/ivy/blob/0b89c7fa050db13ef52b0d2a3e1a5fb801a19fa2/ivy/__init__.py#L42
.. _`empty class`: https://github.com/unifyai/ivy/blob/0b89c7fa050db13ef52b0d2a3e1a5fb801a19fa2/ivy/__init__.py#L34
.. _`device class`: https://github.com/unifyai/ivy/blob/0b89c7fa050db13ef52b0d2a3e1a5fb801a19fa2/ivy/functional/backends/torch/__init__.py#L13
.. _`device.py`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/device.py
.. _`ivy.total_mem_on_dev`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/device.py#L460
.. _`ivy.dev_util`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/device.py#L600
.. _`ivy.num_cpu_cores`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/device.py#L659
.. _`ivy.default_device`: https://github.com/unifyai/ivy/blob/08ebc4d6d5e200dcbb8498b213538ffd550767f3/ivy/functional/ivy/device.py#L720
.. _`ivy.set_soft_device_mode`: https://github.com/unifyai/ivy/blob/afca97b95d7101c45fa647b308fc8c41f97546e3/ivy/functional/ivy/device.py#L292
.. _`@handle_device_shifting`: https://github.com/unifyai/ivy/blob/afca97b95d7101c45fa647b308fc8c41f97546e3/ivy/func_wrapper.py#L797
.. _`ivy.functional.ivy`: https://github.com/unifyai/ivy/tree/afca97b95d7101c45fa647b308fc8c41f97546e3/ivy/functional/ivy
.. _`tensorflow soft device handling function`: https://github.com/unifyai/ivy/blob/afca97b95d7101c45fa647b308fc8c41f97546e3/ivy/functional/backends/tensorflow/device.py#L102
.. _`numpy soft device handling function`: https://github.com/unifyai/ivy/blob/afca97b95d7101c45fa647b308fc8c41f97546e3/ivy/functional/backends/numpy/device.py#L88
.. _`ivy implementation`: https://github.com/unifyai/ivy/blob/afca97b95d7101c45fa647b308fc8c41f97546e3/ivy/functional/ivy/device.py#L138
.. _`tf.device`: https://www.tensorflow.org/api_docs/python/tf/device
.. _`ivy.DefaultDevice`: https://github.com/unifyai/ivy/blob/afca97b95d7101c45fa647b308fc8c41f97546e3/ivy/functional/ivy/device.py#L52
.. _`__enter__`: https://github.com/unifyai/ivy/blob/afca97b95d7101c45fa647b308fc8c41f97546e3/ivy/functional/ivy/device.py#L76
.. _`__exit__`: https://github.com/unifyai/ivy/blob/afca97b95d7101c45fa647b308fc8c41f97546e3/ivy/functional/ivy/device.py#L98
.. _`ivy.unset_soft_device_mode()`: https://github.com/unifyai/ivy/blob/2f90ce7b6a4c8ddb7227348d58363cd2a3968602/ivy/functional/ivy/device.py#L317
.. _`ivy.unset_default_device()`: https://github.com/unifyai/ivy/blob/2f90ce7b6a4c8ddb7227348d58363cd2a3968602/ivy/functional/ivy/device.py#L869
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`devices thread`: https://discord.com/channels/799879767196958751/1189906353653817354
The devices currently supported by Ivy are as follows:
* cpu
* gpu:idx
* tpu:idx
In a similar manner to the :class:`ivy.Dtype` and :class:`ivy.NativeDtype` classes (see `Data Types <data_types.rst>`_), there is both an `ivy.Device`_ class and an :class:`ivy.NativeDevice` class, with :class:`ivy.NativeDevice` initially set as an `empty class`_.
The :class:`ivy.Device` class derives from :code:`str`, and has simple logic in the constructor to verify that the string formatting is correct.
When a backend is set, the :class:`ivy.NativeDevice` is replaced with the backend-specific `device class`_.
Device Module
-------------
The `device.py`_ module provides a variety of functions for working with devices.
A few examples include :func:`ivy.get_all_ivy_arrays_on_dev` which gets all arrays which are currently alive on the specified device, :func:`ivy.dev` which gets the device for input array, and :func:`ivy.num_gpus` which determines the number of available GPUs for use with the backend framework.
Many functions in the :mod:`device.py` module are *convenience* functions, which means that they do not directly modify arrays, as explained in the `Function Types <function_types.rst>`_ section.
For example, the following are all convenience functions: `ivy.total_mem_on_dev`_, which gets the total amount of memory for a given device, `ivy.dev_util`_, which gets the current utilization (%) for a given device, `ivy.num_cpu_cores`_, which determines the number of cores available in the CPU, and `ivy.default_device`_, which returns the correct device to use.
`ivy.default_device`_ is arguably the most important function.
Any function in the functional API that receives a :code:`device` argument will make use of this function, as explained below.
Arguments in other Functions
----------------------------
Like with :code:`dtype`, all :code:`device` arguments are also keyword-only.
All creation functions include the :code:`device` argument, for specifying the device on which to place the created array.
Some other functions outside of the :code:`creation.py` submodule also support the :code:`device` argument, such as :func:`ivy.random_uniform` which is located in :mod:`random.py`, but this is simply because of dual categorization.
:func:`ivy.random_uniform` is also essentially a creation function, despite not being located in :mod:`creation.py`.
The :code:`device` argument is generally not included for functions which accept arrays in the input and perform operations on these arrays.
In such cases, the device of the output arrays is the same as the device for the input arrays.
In cases where the input arrays are located on different devices, an error will generally be thrown, unless the function is specific to distributed training.
The :code:`device` argument is handled in `infer_device`_ for all functions which have the :code:`@infer_device` decorator, similar to how :code:`dtype` is handled.
This function calls `ivy.default_device`_ in order to determine the correct device.
As discussed in the `Function Wrapping <function_wrapping.rst>`_ section, this is applied to all applicable functions dynamically during `backend setting`_.
Overall, `ivy.default_device`_ infers the device as follows:
#. if the :code:`device` argument is provided, use this directly
#. otherwise, if an array is present in the arguments (very rare if the :code:`device` argument is present), set :code:`arr` to this array.
This will then be used to infer the device by calling :func:`ivy.dev` on the array
#. otherwise, if no arrays are present in the arguments (by far the most common case if the :code:`device` argument is present), then use the global default device, which currently can either be :code:`cpu`, :code:`gpu:idx` or :code:`tpu:idx`.
The default device is settable via :func:`ivy.set_default_device`.
For the majority of functions which defer to `infer_device`_ for handling the device, these steps will have been followed and the :code:`device` argument will be populated with the correct value before the backend-specific implementation is even entered into.
Therefore, whereas the :code:`device` argument is listed as optional in the ivy API at :mod:`ivy/functional/ivy/category_name.py`, the argument is listed as required in the backend-specific implementations at :mod:`ivy/functional/backends/backend_name/category_name.py`.
This is exactly the same as with the :code:`dtype` argument, as explained in the `Data Types <data_types.rst>`_ section.
Let's take a look at the function :func:`ivy.zeros` as an example.
The implementation in :mod:`ivy/functional/ivy/creation.py` has the following signature:
.. code-block:: python
@outputs_to_ivy_arrays
@handle_out_argument
@infer_dtype
@infer_device
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
) -> ivy.Array:
Whereas the backend-specific implementations in :mod:`ivy/functional/backends/backend_name/creation.py` all list :code:`device` as required.
Jax:
.. code-block:: python
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device,
) -> JaxArray:
NumPy:
.. code-block:: python
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: np.dtype,
device: str,
) -> np.ndarray:
TensorFlow:
.. code-block:: python
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: tf.DType,
device: str,
) -> Tensor:
PyTorch:
.. code-block:: python
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: torch.dtype,
device: torch.device,
) -> Tensor:
This makes it clear that these backend-specific functions are only entered into once the correct :code:`device` has been determined.
However, the :code:`device` argument for functions without the :code:`@infer_device` decorator is **not** handled by `infer_device`_, and so these defaults must be handled by the backend-specific implementations themselves, by calling :func:`ivy.default_device` internally.
Device handling
---------------
Different frameworks handle devices differently while performing an operation. For example, torch expects
all the tensors to be on the same device while performing an operation, or else, it throws a device exception. On the other hand, tensorflow
doesn't care about this, it moves all the tensors to the same device before performing an operation.
**Controlling Device Handling Behaviour**
In Ivy, users can control the device on which the operation is to be executed using `ivy.set_soft_device_mode`_ flag. There are two cases for this,
either the soft device mode is set to :code:`True` or :code:`False`.
**When ivy.set_soft_device_mode(True)**:
a. All the input arrays are moved to :code:`ivy.default_device()` while performing an operation. If the array is already present
in the default device, no device shifting is done.
In the example below, even though the input arrays :code:`x` and :code:`y` are created on different devices('cpu' and 'gpu:0'), the arrays
are moved to :code:`ivy.default_device()` while performing :code:`ivy.add` operation, and the output array will be on this device.
.. code-block:: python
ivy.set_backend("torch")
ivy.set_soft_device_mode(True)
x = ivy.array([1], device="cpu")
y = ivy.array([34], device="gpu:0")
ivy.add(x, y)
The priority of device shifting is the following in this mode:
#. The ``device`` argument.
#. device the arrays are on.
#. :code:`default_device`
**When ivy.set_soft_device_mode(False)**:
a. If any of the input arrays are on a different device, a device exception is raised.
In the example below, since the input arrays are on different devices('cpu' and 'gpu:0'), an :code:`IvyBackendException` is raised while performing :code:`ivy.add`.
.. code-block:: python
ivy.set_backend("torch")
ivy.set_soft_device_mode(False)
x = ivy.array([1], device="cpu")
y = ivy.array([34], device="gpu:0")
ivy.add(x, y)
This is the exception you will get while running the code above:
.. code-block:: python
IvyBackendException: torch: add: File "/content/ivy/ivy/utils/exceptions.py", line 210, in _handle_exceptions
return fn(*args, **kwargs)
File "/content/ivy/ivy/func_wrapper.py", line 1013, in _handle_nestable
return fn(*args, **kwargs)
File "/content/ivy/ivy/func_wrapper.py", line 905, in _handle_out_argument
return fn(*args, out=out, **kwargs)
File "/content/ivy/ivy/func_wrapper.py", line 441, in _inputs_to_native_arrays
return fn(*new_args, **new_kwargs)
File "/content/ivy/ivy/func_wrapper.py", line 547, in _outputs_to_ivy_arrays
ret = fn(*args, **kwargs)
File "/content/ivy/ivy/func_wrapper.py", line 358, in _handle_array_function
return fn(*args, **kwargs)
File "/content/ivy/ivy/func_wrapper.py", line 863, in _handle_device_shifting
raise ivy.utils.exceptions.IvyException(
During the handling of the above exception, another exception occurred:
Expected all input arrays to be on the same device, but found at least two devices - ('cpu', 'gpu:0'),
set `ivy.set_soft_device_mode(True)` to handle this problem.
b. If all the input arrays are on the same device, the operation is executed without raising any device exceptions.
The example below runs without issues since both the input arrays are on 'gpu:0' device:
.. code-block:: python
ivy.set_backend("torch")
ivy.set_soft_device_mode(False)
x = ivy.array([1], device="gpu:0")
y = ivy.array([34], device="gpu:0")
ivy.add(x, y)
The code to handle all these cases are present inside `@handle_device_shifting`_ decorator, which is wrapped around
all the functions that accept at least one array as input(except mixed and compositional functions) in `ivy.functional.ivy`_ submodule. The decorator calls
:code:`ivy.handle_soft_device_variable` function under the hood to handle device shifting for each backend.
The priority of device shifting is following in this mode:
#. The ``device`` argument.
#. :code:`default_device`
**Soft Device Handling Function**
This is a function which plays a crucial role in the :code:`handle_device_shifting` decorator. The purpose of this function is to ensure that the function :code:`fn` passed to it is executed on the device passed in :code:`device_shifting_dev` argument. If it is passed as :code:`None`, then the function will be executed on the default device.
Most of the backend implementations are very similar, first they move all the arrays to the desired device using :code:`ivy.nested_map` and then execute the function inside the device handling context manager from that native framework. The purpose of executing the function inside the context manager is to handle the functions that do not accept any arrays, the only way in that case to let the native framework know on which device we want the function to be executed on is through the context manager. This approach is used in most backend implementations with the exception being tensorflow, where we don't have to move all the tensors to the desired device because just using its context manager is enough, it moves all the tensors itself internally, and numpy, since it only accepts `cpu` as a device.
**Forcing Operations on User Specified Device**
The `ivy.DefaultDevice`_ context manager can be used to force the operations to be performed on to a specific device. For example,
in the code below, both :code:`x` and :code:`y` will be moved from 'gpu:0' to 'cpu' device and :code:`ivy.add` operation will be performed on 'cpu' device:
.. code-block:: python
x = ivy.array([1], device="gpu:0")
y = ivy.array([34], device="gpu:0")
with ivy.DefaultDevice("cpu"):
z = ivy.add(x, y)
On entering :code:`ivy.DefaultDevice("cpu")` context manager, under the hood, the default device is set to 'cpu' and soft device
mode is turned on. All these happens under the `__enter__`_ method of the
context manager. So from now on, all the operations will be executed on 'cpu' device.
On exiting the context manager(`__exit__`_ method), the default device and soft device mode is reset to the previous state using `ivy.unset_default_device()`_ and
`ivy.unset_soft_device_mode()`_ respectively, to move back to the previous state.
There are some functions(mostly creation function) which accept a :code:`device` argument. This is for specifying on which device the function is executed on and the device of the returned array. :code:`handle_device_shifting` deals with this argument by first checking if it exists and then setting :code:`device_shifting_dev` to that which is then passed to the :code:`handle_soft_device_variable` function depending on the :code:`soft_device` mode.
**Round Up**
This should have hopefully given you a good feel for devices, and how these are handled in Ivy.
If you have any questions, please feel free to reach out on `discord`_ in the `devices thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/RZmTUwTYhKI" class="video">
</iframe>
| ivy/docs/overview/deep_dive/devices.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/devices.rst",
"repo_id": "ivy",
"token_count": 5589
} | 3 |
Operating Modes
===============
.. _`array_significant_figures`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/__init__.py#L865
.. _`array_decimal_values`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/__init__.py#L904
.. _`warning_level`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/__init__.py#L931
.. _`nan_policy`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/__init__.py#L964
.. _`dynamic_backend`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/__init__.py#L998
.. _`precise_mode`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L87
.. _`array_mode`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L437
.. _`nestable_mode`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L490
.. _`exception_trace_mode`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L542
.. _`show_func_wrapper_trace_mode`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L597
.. _`min_denominator`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L2119
.. _`min_base`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L2174
.. _`queue_timeout`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L2444
.. _`tmp_dir`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L2502
.. _`shape_array_mode`: https://github.com/unifyai/ivy/blob/59cd7b5c4e2ca2fc6fc3c3ff728c3f210d9f740c/ivy/functional/ivy/general.py#L3418
Global Parameter Properties
---------------------------
There are a variety of global settings in ivy, each of which comes with: ``ivy.<setting>`` (getter), ``ivy.set_<setting>`` (setter), and ``ivy.unset_<setting>`` (unsetter).
Some of them are:
#. `array_significant_figures`_: Determines the number of significant figures to be shown when printing.
#. `array_decimal_values`_: Determines the number of decimal values to be shown when printing.
#. `warning_level`_: Determines the warning level to be shown when one occurs.
#. `nan_policy`_: Determines the policy of handling related to ``nan``.
#. `dynamic_backend`_: Determines if the global dynamic backend setting is active or not.
#. `precise_mode`_: Determines whether to use a promotion table that avoids any precision loss or a compute efficient table that avoids most wider-than-necessary promotions.
#. `array_mode`_: Determines the mode of whether to convert inputs to ``ivy.NativeArray``, then convert the outputs back to ``ivy.Array``.
#. `nestable_mode`_: Determines the mode of whether to check if function inputs are ``ivy.Container``.
#. `exception_trace_mode`_: Determines how much details of the ivy exception traces to be shown in the log.
#. `show_func_wrapper_trace_mode`_: Determines whether to show ``func_wrapper`` related traces in the log.
#. `min_denominator`_: Determines the global global minimum denominator used by ivy for numerically stable division.
#. `min_base`_: Determines the global global minimum base used by ivy for numerically stablestable power raising.
#. `queue_timeout`_: Determines the timeout value (in seconds) for the global queue.
#. `tmp_dir`_: Determines the name for the temporary folder if it is used.
#. `shape_array_mode`_: Determines whether to return shape as ``ivy.Array``.
Let's look into more details about getter and setter below!
Getter: ``ivy.<setting>`` attribute
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``ivy.<setting>`` is a read-only static attribute. It acts as a getter and it will change internally whenever its related setter is used.
Should a user attempts to set the attribute directly, an error will be raised, suggesting them to change its value through the respective setter or unsetter.
.. code-block:: python
>>> ivy.array_mode
True
>>> ivy.array_mode = False
File "<stdin>", line 1, in <module>
File ".../ivy/ivy/__init__.py", line 1306, in __setattr__
raise ivy.utils.exceptions.IvyException(
IvyException: Property: array_mode is read only! Please use the setter: set_array_mode() for setting its value!
Setter: ``ivy.set_<setting>`` and ``ivy.unset_<setting>`` functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to change the value of a property, setter functions must be used.
.. code-block:: python
>>> ivy.array_mode
True
>>> ivy.set_array_mode(False)
>>> ivy.array_mode
False
>>> ivy.unset_array_mode()
>>> ivy.array_mode
True
| ivy/docs/overview/deep_dive/operating_modes.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/operating_modes.rst",
"repo_id": "ivy",
"token_count": 1964
} | 4 |
Why Unify?
==========
“What is the point of unifying all ML frameworks?” you may ask.
You may be perfectly happy with the framework you currently use, and that’s great! We live in a time where great ML tools are in abundance, and that’s a wonderful thing!
Ivy just makes a wonderful thing **even better**…
We’ll give two clear examples of how Ivy can streamline your ML workflow and save you **weeks** of development time.
No More Re-implementations 🚧
-----------------------------
Let’s say `DeepMind <https://deepmind.com>`_ release an awesome paper in JAX, and you’d love to try it out using your own framework of choice.
Let’s use `PerceiverIO <https://deepmind.com/research/open-source/perceiver-IO>`_ as an example.
What happens currently is:
#. A slew of open-source developers rush to re-implement the code in all ML frameworks, leading to many different versions (`a <https://github.com/lucidrains/perceiver-pytorch>`_, `b <https://github.com/krasserm/perceiver-io>`_, `c <https://github.com/Rishit-dagli/Perceiver>`_, `d <https://github.com/esceptico/perceiver-io>`_, `e <https://github.com/huggingface/transformers/tree/v4.16.1/src/transformers/models/perceiver>`_, `f <https://github.com/keras-team/keras-io/blob/master/examples/vision/perceiver_image_classification.py>`_, `g <https://github.com/deepmind/deepmind-research/tree/21084c8489c34defe7d4e20be89715bba914945c/perceiver>`_).
#. These implementations all inevitably deviate from the original, often leading to: erroneous training, poor convergence, performance issues etc.
Entirely new papers can even be published for having managed to `get things working in a new framework <https://link.springer.com/chapter/10.1007/978-3-030-01424-7_10>`_.
#. These repositories become full of issues, pull requests, and confusion over why things do or don’t work exactly as expected in the original paper and codebase (`a <https://github.com/lucidrains/perceiver-pytorch/issues>`_, `b <https://github.com/krasserm/perceiver-io/issues>`_, `c <https://github.com/Rishit-dagli/Perceiver/issues>`_, `d <https://github.com/esceptico/perceiver-io/issues>`_, `e <https://github.com/huggingface/transformers/issues>`_, `f <https://github.com/keras-team/keras-io/issues>`_, `g <https://github.com/deepmind/deepmind-research/issues>`_).
#. In total, 100s of hours are spent on: developing each spin-off codebase, testing the code, discussing the errors, and iterating to try and address them.
This is all for the sake of re-implementing a single project in multiple frameworks.
With Ivy, this process becomes:
#. With one line, convert the code directly to your framework with a computation graph guaranteed to be identical to the original.
We have turned a 4-step process which can take 100s of hours into a 1-step process which takes a few seconds.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/background/why_unify/perceiver_effort.png?raw=true
:align: center
:width: 100%
Taking things further, we can use this automatic conversion tool to open up **all** ML tools to **everyone** regardless of their framework.
“Infinite” Shelf-Life ✅
------------------------
Wouldn’t it be nice if we could write some code once and know that it won’t become quickly obsolete among the frantic rush of framework development?
A lot of developers have spent a lot of time porting TensorFlow code to PyTorch in the last few years, with examples being `Lucid <https://github.com/greentfrapp/lucent>`_, `Honk <https://github.com/castorini/honk>`_ and `Improving Language Understanding <https://github.com/huggingface/pytorch-openai-transformer-lm>`_.
The pattern hasn’t changed, developers are now spending many hours porting code to JAX.
For example: `TorchVision <https://github.com/rolandgvc/flaxvision>`_, `TensorFlow Graph Nets library <https://github.com/deepmind/jraph>`_, `TensorFlow Probability <https://github.com/deepmind/distrax>`_, `TensorFlow Sonnet <https://github.com/deepmind/dm-haiku>`_.
What about the next framework that gets released in a few years from now, must we continue re-implementing everything over and over again?
With Ivy, you can write your code **once**, and then it will support all future ML frameworks with **zero** changes needed.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/background/why_unify/future_proof.png?raw=true
:align: center
:width: 80%
The same can be said about high-level code for: Modules, Optimizers and Trainers etc.
Currently, the status quo is to continue implementing new high-level libraries for each new framework, with examples being: (a) `Sonnet <https://github.com/deepmind/sonnet>`_, `Keras <https://github.com/keras-team/keras>`_ and `Dopamine <https://github.com/google/dopamine>`_ for TensorFlow (b) `Ignite <https://github.com/pytorch/ignite>`_, `Catalyst <https://github.com/catalyst-team/catalyst>`_, `Lightning <https://github.com/PyTorchLightning/pytorch-lightning>`_, and `FastAI <https://github.com/fastai/fastai>`_ for PyTorch, and (c) `Haiku <https://github.com/deepmind/dm-haiku>`_, `Flax <https://github.com/google/flax>`_, `Trax <https://github.com/google/trax>`_ and `Objax <https://github.com/google/objax>`_ for JAX.
With Ivy, we have implemented Modules, Optimizers, and Trainers **once** with simultaneous support for all **current** and **future** frameworks.
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/background/why_unify/reinvented_wheels.png?raw=true
:align: center
:width: 100%
**Round Up**
Hopefully, this has given you some idea of the many benefits that a fully unified ML framework could offer 🙂
Please reach out on `discord <https://discord.gg/sXyFF8tDtm>`_ if you have any questions!
| ivy/docs/overview/motivation/why_unify.rst/0 | {
"file_path": "ivy/docs/overview/motivation/why_unify.rst",
"repo_id": "ivy",
"token_count": 1838
} | 5 |
.. _`RWorks Wrapper Frameworks`:
Wrapper Frameworks
==================
.. _`EagerPy`: https://eagerpy.jonasrauber.de/
.. _`PyTorch`: https://pytorch.org/
.. _`TensorFlow`: https://www.tensorflow.org/
.. _`JAX`: https://jax.readthedocs.io/
.. _`NumPy`: https://numpy.org/
.. _`Keras`: https://keras.io/
.. _`Microsoft Cognitive Toolkit`: https://learn.microsoft.com/en-us/cognitive-toolkit/
.. _`Theano`: https://github.com/Theano/Theano
.. _`PlaidML`: https://github.com/plaidml/plaidml
.. _`Thinc`: https://thinc.ai/
.. _`MXNet`: https://mxnet.apache.org/
.. _`TensorLy`: http://tensorly.org/
.. _`NeuroPod`: https://neuropod.ai/
.. _`CuPy`: https://cupy.dev/
.. _`SciPy`: https://scipy.org/
.. _`TorchScript`: https://pytorch.org/docs/stable/jit.html
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. |eagerpy| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/wrapper_frameworks/eagerpy.png
:height: 15pt
:class: dark-light
.. |keras| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/wrapper_frameworks/keras.png
:height: 20pt
:class: dark-light
.. |thinc| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/wrapper_frameworks/thinc.png
:height: 15pt
.. |tensorly| image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/related_work/wrapper_frameworks/tensorly.png
:height: 20pt
There are a variety of wrapper frameworks which wrap around other ML frameworks, enabling these ML frameworks to be switched in and out very easily in the backend, and enabling framework-agnostic code to be written, both for deployment and for training.
These wrapper frameworks can be considered as “higher level” than the individual ML frameworks that they wrap, given that they abstract these ML frameworks into the backend, and they typically do not go any lower level than this, often being pure Python projects, delegating all lower level compiler code handling to the frameworks being wrapped.
EagerPy |eagerpy|
-----------------
`EagerPy`_ lets users write code that automatically works natively with `PyTorch`_, `TensorFlow`_, `JAX`_, and `NumPy`_.
Key differences to Ivy are the lack of transpiler support and the lack of a stateful API for constructing high level classes such as network layers, optimizers, initializers, and trainers in the framework.
Keras |keras|
-------------
`Keras`_ includes high level classes for building network layers, optimizers, initializers, and trainers, and also a lower level functional API.
Up until version 2.3, Keras supported multiple backends, including `TensorFlow`_, `Microsoft Cognitive Toolkit`_, `Theano`_, and `PlaidML`_, but as of version 2.4, only TensorFlow is supported.
Thinc |thinc|
-------------
`Thinc`_ is a lightweight library that offers a functional-programming API for composing models, with support for layers defined in `PyTorch`_, `TensorFlow`_ or `MXNet`_.
Thinc can be used as an interface layer, a standalone toolkit, or a way to develop new models.
The focus is very much on high level training workflows, and unlike `EagerPy`_ and `Keras`_, the framework does not implement an extensive functional API at the array processing level.
For example, common functions such as :func:`linspace`, :func:`arange`, :func:`scatter`, :func:`gather`, :func:`split`, :func:`unstack`, and many more are not present in the framework.
Thinc instead focuses on tools to compose neural networks based on the most common building blocks, with high level APIs for: Models, Layers, Optimizers, Initializers, Schedules, and Losses.
TensorLy |tensorly|
-------------------
`TensorLy`_ provides utilities to use a variety of tensor methods, from core tensor operations and tensor algebra to tensor decomposition and regression.
It supports `PyTorch`_, `Numpy`_, `CuPy`_, `JAX`_, `TensorFlow`_, `MXNet`_ and `SciPy`_ in the backend.
The API is fully functional and strongly focused on high dimensional tensor methods, such as :code:`partial_SVD`, :code:`kron` and :code:`tucker_mode_dot`, and it does not include a stateful API for constructing high level classes such as network layers, optimizers, initializers and trainers.
There is also no support for some simpler and more common array processing functions such as :func:`scatter`, :func:`gather`, :func:`minimum`, :func:`maximum`, :func:`logical_or`, :func:`logical_and`, and many others.
NeuroPod
--------
`Neuropod`_ is a library that provides a uniform interface to run deep learning models from multiple frameworks in C++ and Python.
Neuropod makes it easy for researchers to build models in a framework of their choice while also simplifying the deployment of these models.
It currently supports `TensorFlow`_, `PyTorch`_, `TorchScript`_, and `Keras`_.
Compared to other wrapper frameworks, NeuroPod is very high level.
It wraps entire models which have already been trained, in a manner where the interface to these models is unified.
It excels in a setting where multiple networks, which may have been trained in a variety of frameworks, must all act as subsystems performing specific tasks as part of a larger complex system, and the network interfaces in this larger system should be unified.
This abstraction enables subsystem networks to be quickly replaced by other networks performing the same role, irrespective of which framework the subsystem is running under the hood.
| ivy/docs/overview/related_work/wrapper_frameworks.rst/0 | {
"file_path": "ivy/docs/overview/related_work/wrapper_frameworks.rst",
"repo_id": "ivy",
"token_count": 1609
} | 6 |
# global
import abc
from typing import Tuple, Optional, List, Union
# local
import ivy
Finfo = None
Iinfo = None
class _ArrayWithDataTypes(abc.ABC):
def astype(
self: ivy.Array,
dtype: ivy.Dtype,
/,
*,
copy: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Copy an array to a specified data type irrespective of :ref:`type-
promotion` rules.
.. note::
Casting floating-point ``NaN`` and ``infinity`` values to integral data types
is not specified and is implementation-dependent.
.. note::
When casting a boolean input array to a numeric data type, a value of ``True``
must cast to a numeric value equal to ``1``, and a value of ``False`` must cast
to a numeric value equal to ``0``.
When casting a numeric input array to ``bool``, a value of ``0`` must cast to
``False``, and a non-zero value must cast to ``True``.
Parameters
----------
self
array to cast.
dtype
desired data type.
copy
specifies whether to copy an array when the specified ``dtype`` matches
the data type of the input array ``x``. If ``True``, a newly allocated
array must always be returned. If ``False`` and the specified ``dtype``
matches the data type of the input array, the input array must be returned;
otherwise, a newly allocated must be returned. Default: ``True``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an array having the specified data type. The returned array must have
the same shape as ``x``.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([[-1, -2], [0, 2]])
>>> print(x.astype(ivy.float64))
ivy.array([[-1., -2.], [0., 2.]])
"""
return ivy.astype(self._data, dtype, copy=copy, out=out)
def broadcast_arrays(
self: ivy.Array, *arrays: Union[ivy.Array, ivy.NativeArray]
) -> List[ivy.Array]:
"""`ivy.Array` instance method variant of `ivy.broadcast_arrays`. This
method simply wraps the function, and so the docstring for
`ivy.broadcast_arrays` also applies to this method with minimal
changes.
Parameters
----------
self
An input array to be broadcasted against other input arrays.
arrays
an arbitrary number of arrays to-be broadcasted.
Each array must have the same shape.
Each array must have the same dtype as its
corresponding input array.
Returns
-------
ret
A list containing broadcasted arrays of type `ivy.Array`
Examples
--------
With :class:`ivy.Array` inputs:
>>> x1 = ivy.array([1, 2])
>>> x2 = ivy.array([0.2, 0.])
>>> x3 = ivy.zeros(2)
>>> y = x1.broadcast_arrays(x2, x3)
>>> print(y)
[ivy.array([1, 2]), ivy.array([0.2, 0. ]), ivy.array([0., 0.])]
With mixed :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x1 = ivy.array([-1., 3.4])
>>> x2 = ivy.native_array([2.4, 5.1])
>>> y = x1.broadcast_arrays(x2)
>>> print(y)
[ivy.array([-1., 3.4]), ivy.array([2.4, 5.1])]
"""
return ivy.broadcast_arrays(self._data, *arrays)
def broadcast_to(
self: ivy.Array, /, shape: Tuple[int, ...], *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""`ivy.Array` instance method variant of `ivy.broadcast_to`. This
method simply wraps the function, and so the docstring for
`ivy.broadcast_to` also applies to this method with minimal changes.
Parameters
----------
self
input array to be broadcasted.
shape
desired shape to be broadcasted to.
out
Optional array to store the broadcasted array.
Returns
-------
ret
Returns the broadcasted array of shape 'shape'
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.array([1, 2, 3])
>>> y = x.broadcast_to((3,3))
>>> print(y)
ivy.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
return ivy.broadcast_to(self._data, shape=shape, out=out)
def can_cast(self: ivy.Array, to: ivy.Dtype) -> bool:
"""`ivy.Array` instance method variant of `ivy.can_cast`. This method
simply wraps the function, and so the docstring for `ivy.can_cast` also
applies to this method with minimal changes.
Parameters
----------
self
input array from which to cast.
to
desired data type.
Returns
-------
ret
``True`` if the cast can occur according to :ref:`type-promotion` rules;
otherwise, ``False``.
Examples
--------
>>> x = ivy.array([1., 2., 3.])
>>> print(x.dtype)
float32
>>> x = ivy.array([4., 5., 6.])
>>> print(x.can_cast(ivy.float64))
True
"""
return ivy.can_cast(self._data, to)
def dtype(
self: ivy.Array, as_native: bool = False
) -> Union[ivy.Dtype, ivy.NativeDtype]:
"""`ivy.Array` instance method variant of `ivy.dtype`. This method
helps to get the data type of the array.
Parameters
----------
self
The input array.
as_native
Whether to return the native data type of the array.
If True, returns the native data type. Default is False.
Returns
-------
ret
The data type of the array. If as_native is True,
returns the native data type.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = x.dtype()
>>> print(y)
int32
>>> x= ivy.array([1.0, 2.0, 3.0], dtype=ivy.float64)
>>> y = x.dtype(as_native=True)
>>> print(y)
float64
"""
return ivy.dtype(self._data, as_native=as_native)
def finfo(self: ivy.Array, /) -> Finfo:
"""Array instance method variant of `ivy.finfo`.
Parameters
----------
self
input array.
Returns
-------
ret
An instance of the `Finfo` class, containing information
about the floating point data type of the input array.
Example
-------
>>> x = ivy.array([0.7,8.4,3.14], dtype=ivy.float32)
>>> print(x.finfo())
finfo(resolution=1e-06, min=-3.4028235e+38, max=3.4028235e+38, dtype=float32)
"""
return ivy.finfo(self._data)
def iinfo(self: ivy.Array, /) -> Iinfo:
"""`ivy.Array` instance method variant of `ivy.iinfo`. This method
simply wraps the function, and so the docstring for `ivy.iinfo` also
applies to this method with minimal changes.
Parameters
----------
self
input array.
Returns
-------
ret
An instance of the `Iinfo` class, containing information
about the integer data type of the input array.
Examples
--------
>>> x = ivy.array([-119,122,14], dtype=ivy.int8))
>>> x.iinfo()
iinfo(min=-128, max=127, dtype=int8)
>>> x = ivy.array([-12,54,1,9,-1220], dtype=ivy.int16))
>>> x.iinfo()
iinfo(min=-32768, max=32767, dtype=int16)
"""
return ivy.iinfo(self._data)
def is_bool_dtype(self: ivy.Array) -> bool:
return ivy.is_bool_dtype(self._data)
def is_float_dtype(self: ivy.Array) -> bool:
"""`ivy.Array` instance method variant of `ivy.is_float_dtype`. This
method simply checks to see if the array is of type `float`.
Parameters
----------
self
Input array from which to check for float dtype.
Returns
-------
ret
Boolean value of whether the array is of type `float`.
Examples
--------
>>> x = ivy.array([1, 2, 3], dtype=ivy.int8)
>>> print(x.is_float_dtype())
False
>>> x = ivy.array([2.3, 4.5, 6.8], dtype=ivy.float32)
>>> print( x.is_float_dtype())
True
"""
return ivy.is_float_dtype(self._data)
def is_int_dtype(self: ivy.Array) -> bool:
return ivy.is_int_dtype(self._data)
def is_uint_dtype(self: ivy.Array) -> bool:
return ivy.is_uint_dtype(self._data)
def result_type(
self: ivy.Array,
*arrays_and_dtypes: Union[ivy.Array, ivy.NativeArray, ivy.Dtype],
) -> ivy.Dtype:
"""`ivy.Array` instance method variant of `ivy.result_type`. This
method simply wraps the function, and so the docstring for
`ivy.result_type` also applies to this method with minimal changes.
Parameters
----------
self
input array from which to cast.
arrays_and_dtypes
an arbitrary number of input arrays and/or dtypes.
Returns
-------
ret
the dtype resulting from an operation involving the input arrays and dtypes.
Examples
--------
>>> x = ivy.array([0, 1, 2])
>>> print(x.dtype)
int32
>>> x.result_type(ivy.float64)
<dtype:'float64'>
"""
return ivy.result_type(self._data, *arrays_and_dtypes)
| ivy/ivy/data_classes/array/data_type.py/0 | {
"file_path": "ivy/ivy/data_classes/array/data_type.py",
"repo_id": "ivy",
"token_count": 4581
} | 7 |
# global
import abc
from typing import (
Optional,
Union,
Sequence,
Tuple,
List,
Iterable,
Callable,
Literal,
Any,
)
from numbers import Number
# local
import ivy
from ivy import handle_view
class _ArrayWithManipulationExperimental(abc.ABC):
@handle_view
def moveaxis(
self: ivy.Array,
source: Union[int, Sequence[int]],
destination: Union[int, Sequence[int]],
/,
*,
copy: Optional[bool] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.moveaxis. This method
simply wraps the function, and so the docstring for ivy.unstack also
applies to this method with minimal changes.
Parameters
----------
a
The array whose axes should be reordered.
source
Original positions of the axes to move. These must be unique.
destination
Destination positions for each of the original axes.
These must also be unique.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a view
of the input array.
out
optional output array, for writing the result to.
Returns
-------
ret
Array with moved axes. This array is a view of the input array.
Examples
--------
>>> x = ivy.zeros((3, 4, 5))
>>> x.moveaxis(0, -1).shape
(4, 5, 3)
>>> x.moveaxis(-1, 0).shape
(5, 3, 4)
"""
return ivy.moveaxis(self._data, source, destination, copy=copy, out=out)
def heaviside(
self: ivy.Array,
x2: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.heaviside. This method
simply wraps the function, and so the docstring for ivy.heaviside also
applies to this method with minimal changes.
Parameters
----------
self
input array.
x2
values to use where x1 is zero.
out
optional output array, for writing the result to.
Returns
-------
ret
output array with element-wise Heaviside step function of x1.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> x1 = ivy.array([-1.5, 0, 2.0])
>>> x2 = ivy.array([0.5])
>>> ivy.heaviside(x1, x2)
ivy.array([0.0000, 0.5000, 1.0000])
>>> x1 = ivy.array([-1.5, 0, 2.0])
>>> x2 = ivy.array([1.2, -2.0, 3.5])
>>> ivy.heaviside(x1, x2)
ivy.array([0., -2., 1.])
"""
return ivy.heaviside(self._data, x2, out=out)
@handle_view
def flipud(
self: ivy.Array,
/,
*,
copy: Optional[bool] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.flipud. This method simply
wraps the function, and so the docstring for ivy.flipud also applies to
this method with minimal changes.
Parameters
----------
self
The array to be flipped.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning
a view of the input array.
out
optional output array, for writing the result to.
Returns
-------
ret
Array corresponding to input array with elements
order reversed along axis 0.
Examples
--------
>>> m = ivy.diag([1, 2, 3])
>>> m.flipud()
ivy.array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
"""
return ivy.flipud(self._data, copy=copy, out=out)
def vstack(
self: ivy.Array,
arrays: Union[
Tuple[Union[ivy.Array, ivy.NativeArray]],
List[Union[ivy.Array, ivy.NativeArray]],
],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.vstack. This method simply
wraps the function, and so the docstring for ivy.vstack also applies to
this method with minimal changes.
Examples
--------
>>> x = ivy.array([[1, 2]])
>>> y = [ivy.array([[5, 6]]), ivy.array([[7, 8]])]
>>> print(x.vstack(y))
ivy.array([[1, 2],
[5, 6],
[7, 8]])
"""
if not isinstance(arrays, (list, tuple)):
arrays = [arrays]
if isinstance(arrays, tuple):
x = (self._data) + arrays
else:
x = [self._data] + arrays
return ivy.vstack(x, out=out)
def hstack(
self: ivy.Array,
arrays: Union[
Tuple[Union[ivy.Array, ivy.NativeArray]],
List[Union[ivy.Array, ivy.NativeArray]],
],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.hstack. This method simply
wraps the function, and so the docstring for ivy.hstack also applies to
this method with minimal changes.
Examples
--------
>>> x = ivy.array([[1, 2]])
>>> y = [ivy.array([[5, 6]]), ivy.array([[7, 8]])]
>>> print(x.vstack(y))
ivy.array([1, 2, 5, 6, 7, 8])
"""
if not isinstance(arrays, (list, tuple)):
arrays = [arrays]
if isinstance(arrays, tuple):
x = (self._data,) + arrays
else:
x = [self._data] + arrays
return ivy.hstack(x, out=out)
@handle_view
def rot90(
self: ivy.Array,
/,
*,
copy: Optional[bool] = None,
k: int = 1,
axes: Tuple[int, int] = (0, 1),
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.rot90. This method simply
wraps the function, and so the docstring for ivy.rot90 also applies to
this method with minimal changes.
Parameters
----------
self
Input array of two or more dimensions.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning
a view of the input array.
k
Number of times the array is rotated by 90 degrees.
axes
The array is rotated in the plane defined by the axes. Axes must be
different.
out
Optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Array with a rotated view of input array.
Examples
--------
>>> m = ivy.array([[1,2], [3,4]])
>>> m.rot90()
ivy.array([[2, 4],
[1, 3]])
>>> m = ivy.array([[1,2], [3,4]])
>>> m.rot90(k=2)
ivy.array([[4, 3],
[2, 1]])
>>> m = ivy.array([[[0, 1],\
[2, 3]],\
[[4, 5],\
[6, 7]]])
>>> m.rot90(k=2, axes=(1,2))
ivy.array([[[3, 2],
[1, 0]],
[[7, 6],
[5, 4]]])
"""
return ivy.rot90(self._data, copy=copy, k=k, axes=axes, out=out)
def top_k(
self: ivy.Array,
k: int,
/,
*,
axis: int = -1,
largest: bool = True,
sorted: bool = True,
out: Optional[tuple] = None,
) -> Tuple[ivy.Array, ivy.NativeArray]:
"""ivy.Array instance method variant of ivy.top_k. This method simply
wraps the function, and so the docstring for ivy.top_k also applies to
this method with minimal changes.
Parameters
----------
self
The array to compute top_k for.
k
Number of top elements to return must not exceed the array size.
axis
The axis along which we must return the top elements default value is 1.
largest
If largest is set to False we return k smallest elements of the array.
sorted
If sorted is set to True we return the elements in sorted order.
out:
Optional output tuple, for writing the result to. Must have two arrays,
with a shape that the returned tuple broadcast to.
Returns
-------
ret
A named tuple with values and indices of top k elements.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([2., 1., -3., 5., 9., 0., -4])
>>> y = x.top_k(2)
>>> print(y)
top_k(values=ivy.array([9., 5.]), indices=ivy.array([4, 3]))
"""
return ivy.top_k(self, k, axis=axis, largest=largest, sorted=sorted, out=out)
@handle_view
def fliplr(
self: ivy.Array,
/,
*,
copy: Optional[bool] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.fliplr. This method simply
wraps the function, and so the docstring for ivy.fliplr also applies to
this method with minimal changes.
Parameters
----------
self
The array to be flipped. Must be at least 2-D.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a
view of the input array.
out
optional output array, for writing the result to.
Returns
-------
ret
Array corresponding to input array with elements
order reversed along axis 1.
Examples
--------
>>> m = ivy.diag([1, 2, 3])
>>> m.fliplr()
ivy.array([[0, 0, 1],
[0, 2, 0],
[3, 0, 0]])
"""
return ivy.fliplr(self._data, copy=copy, out=out)
def i0(
self: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.i0. This method simply
wraps the function, and so the docstring for ivy.i0 also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
out
Optional output, for writing the result to.
Returns
-------
ret
Array with modified Bessel function of the first kind, order 0.
Examples
--------
>>> x = ivy.array([[1, 2, 3]])
>>> x.i0()
ivy.array([1.26606588, 2.2795853 , 4.88079259])
"""
return ivy.i0(self._data, out=out)
@handle_view
def flatten(
self: ivy.Array,
*,
copy: Optional[bool] = None,
start_dim: int = 0,
end_dim: int = -1,
order: str = "C",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.flatten. This method simply
wraps the function, and so the docstring for ivy.flatten also applies
to this method with minimal changes.
Parameters
----------
self
input array to flatten.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning a
view of the input array.
start_dim
first dim to flatten. If not set, defaults to 0.
end_dim
last dim to flatten. If not set, defaults to -1.
order
Read the elements of the input container using this index order,
and place the elements into the reshaped array using this index order.
‘C’ means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first axis index
changing slowest.
‘F’ means to read / write the elements using Fortran-like index order, with
the first index changing fastest, and the last index changing slowest.
Note that the ‘C’ and ‘F’ options take no account of the memory layout
of the underlying array, and only refer to the order of indexing.
Default order is 'C'.
out
Optional output, for writing the result to.
Returns
-------
ret
the flattened array over the specified dimensions.
Examples
--------
>>> x = ivy.array([[1,2], [3,4]])
>>> x.flatten()
ivy.array([1, 2, 3, 4])
>>> x = ivy.array([[1,2], [3,4]])
>>> x.flatten(order='F')
ivy.array([1, 3, 2, 4])
>>> x = ivy.array(
[[[[ 5, 5, 0, 6],
[17, 15, 11, 16],
[ 6, 3, 13, 12]],
[[ 6, 18, 10, 4],
[ 5, 1, 17, 3],
[14, 14, 18, 6]]],
[[[12, 0, 1, 13],
[ 8, 7, 0, 3],
[19, 12, 6, 17]],
[[ 4, 15, 6, 15],
[ 0, 5, 17, 9],
[ 9, 3, 6, 19]]],
[[[17, 13, 11, 16],
[ 4, 18, 17, 4],
[10, 10, 9, 1]],
[[19, 17, 13, 10],
[ 4, 19, 16, 17],
[ 2, 12, 8, 14]]]]
)
>>> x.flatten(start_dim = 1, end_dim = 2)
ivy.array(
[[[ 5, 5, 0, 6],
[17, 15, 11, 16],
[ 6, 3, 13, 12],
[ 6, 18, 10, 4],
[ 5, 1, 17, 3],
[14, 14, 18, 6]],
[[12, 0, 1, 13],
[ 8, 7, 0, 3],
[19, 12, 6, 17],
[ 4, 15, 6, 15],
[ 0, 5, 17, 9],
[ 9, 3, 6, 19]],
[[17, 13, 11, 16],
[ 4, 18, 17, 4],
[10, 10, 9, 1],
[19, 17, 13, 10],
[ 4, 19, 16, 17],
[ 2, 12, 8, 14]]]))
"""
return ivy.flatten(
self._data,
copy=copy,
start_dim=start_dim,
end_dim=end_dim,
order=order,
out=out,
)
def pad(
self: ivy.Array,
pad_width: Union[Iterable[Tuple[int]], int],
/,
*,
mode: Union[
Literal[
"constant",
"dilated",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
"empty",
],
Callable,
] = "constant",
stat_length: Union[Iterable[Tuple[int]], int] = 1,
constant_values: Union[Iterable[Tuple[Number]], Number] = 0,
end_values: Union[Iterable[Tuple[Number]], Number] = 0,
reflect_type: Literal["even", "odd"] = "even",
out: Optional[ivy.Array] = None,
**kwargs: Optional[Any],
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.pad.
This method simply wraps the function, and so the docstring for
ivy.pad also applies to this method with minimal changes.
"""
return ivy.pad(
self._data,
pad_width,
mode=mode,
stat_length=stat_length,
constant_values=constant_values,
end_values=end_values,
reflect_type=reflect_type,
out=out,
**kwargs,
)
@handle_view
def vsplit(
self: ivy.Array,
indices_or_sections: Union[int, Sequence[int], ivy.Array],
/,
*,
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""ivy.Array instance method variant of ivy.vsplit. This method simply
wraps the function, and so the docstring for ivy.vsplit also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning
a view of the input array.
indices_or_sections
If indices_or_sections is an integer n, the array is split into n
equal sections, provided that n must be a divisor of the split axis.
If indices_or_sections is a sequence of ints or 1-D array,
then input is split at each of the indices.
Returns
-------
ret
input array split vertically.
Examples
--------
>>> ary = ivy.array(
[[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]]
)
>>> ary.vsplit(2)
[ivy.array([[[0., 1.], [2., 3.]]]), ivy.array([[[4., 5.], [6., 7.]]])])
"""
return ivy.vsplit(self._data, indices_or_sections, copy=copy)
@handle_view
def dsplit(
self: ivy.Array,
indices_or_sections: Union[int, Sequence[int], ivy.Array],
/,
*,
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""ivy.Array instance method variant of ivy.dsplit. This method simply
wraps the function, and so the docstring for ivy.dsplit also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
indices_or_sections
If indices_or_sections is an integer n, the array is split into n
equal sections, provided that n must be a divisor of the split axis.
If indices_or_sections is a sequence of ints or 1-D array,
then input is split at each of the indices.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning
a view of the input array.
Returns
-------
ret
input array split along the 3rd axis.
Examples
--------
>>> ary = ivy.array(
[[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]]
)
>>> ary.dsplit(2)
[ivy.array([[[ 0., 1.], [ 4., 5.]], [[ 8., 9.], [12., 13.]]]),
ivy.array([[[ 2., 3.], [ 6., 7.]], [[10., 11.], [14., 15.]]])]
"""
return ivy.dsplit(self._data, indices_or_sections, copy=copy)
@handle_view
def atleast_1d(
self: ivy.Array,
*arys: Union[ivy.Array, bool, Number],
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""ivy.Array instance method variant of ivy.atleast_1d. This method
simply wraps the function, and so the docstring for ivy.atleast_1d also
applies to this method with minimal changes.
Parameters
----------
self
Input array. Cannot be a scalar input.
arys
An arbitrary number of input arrays.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning
a view of the input array.
Returns
-------
ret
List of arrays, each with a.ndim >= 1. Copies are made
only if necessary.
Examples
--------
>>> a1 = ivy.array([[1,2,3]])
>>> a2 = ivy.array(4)
>>> a1.atleast_1d(a2,5,6)
[ivy.array([[1, 2, 3]]), ivy.array([4]), ivy.array([5]), ivy.array([6])]
"""
return ivy.atleast_1d(self._data, *arys, copy=copy)
def dstack(
self: ivy.Array,
arrays: Union[
Tuple[Union[ivy.Array, ivy.NativeArray]],
List[Union[ivy.Array, ivy.NativeArray]],
],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.dstack. This method simply
wraps the function, and so the docstring for ivy.dstack also applies to
this method with minimal changes.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([2, 3, 4])
>>> x.dstack(y)
ivy.array([[[1, 2],
[2, 3],
[3, 4]]])
"""
if not isinstance(arrays, (list, tuple)):
arrays = [arrays]
if isinstance(arrays, tuple):
x = (self._data,) + arrays
else:
x = [self._data] + arrays
return ivy.dstack(x, out=out)
@handle_view
def atleast_2d(
self: ivy.Array,
*arys: ivy.Array,
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""ivy.Array instance method variant of ivy.atleast_2d. This method
simply wraps the function, and so the docstring for ivy.atleast_2d also
applies to this method with minimal changes.
Parameters
----------
self
Input array. Cannot be a scalar input.
arys
An arbitrary number of input arrays.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning
a view of the input array.
Returns
-------
ret
List of arrays, each with a.ndim >= 2. Copies are made
only if necessary.
Examples
--------
>>> a1 = ivy.array([[1,2,3]])
>>> a2 = ivy.array(4)
>>> a1.atleast_2d(a2,5,6)
[ivy.array([[1, 2, 3]]), ivy.array([[4]]), ivy.array([[5]]), ivy.array([[6]])]
"""
return ivy.atleast_2d(self._data, *arys, copy=copy)
@handle_view
def atleast_3d(
self: ivy.Array,
*arys: Union[ivy.Array, bool, Number],
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""ivy.Array instance method variant of ivy.atleast_3d. This method
simply wraps the function, and so the docstring for ivy.atleast_3d also
applies to this method with minimal changes.
Parameters
----------
self
Input array. Cannot be a scalar input.
arys
An arbitrary number of input arrays.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning
a view of the input array.
Returns
-------
ret
List of arrays, each with a.ndim >= 3. Copies are made only if necessary
and views with three or more dimensions are returned. For example, a 1-D
array of shape (N,) becomes a view of shape (1, N, 1), and a 2-D array
of shape (M, N) becomes a view of shape (M, N, 1).
Examples
--------
>>> a1 = ivy.array([[1,2,3]])
>>> a2 = ivy.array([4,8])
>>> a1.atleast_3d(a2,5,6)
[ivy.array([[[1],
[2],
[3]]]), ivy.array([[[4],
[8]]]), ivy.array([[[5]]]), ivy.array([[[6]]])]
"""
return ivy.atleast_3d(self._data, *arys, copy=copy)
def take_along_axis(
self: ivy.Array,
indices: ivy.Array,
axis: int,
/,
*,
mode: str = "fill",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.take_along_axis. This
method simply wraps the function, and so the docstring for
ivy.take_along_axis also applies to this method with minimal changes.
Parameters
----------
self
The source array.
indices
The indices of the values to extract.
axis
The axis over which to select values.
mode
One of: 'clip', 'fill', 'drop'. Parameter controlling how out-of-bounds
indices will be handled.
out
Optional output, for writing the result to.
Returns
-------
ret
The returned array has the same shape as indices.
Examples
--------
>>> arr = ivy.array([[4, 3, 5], [1, 2, 1]])
>>> indices = ivy.array([[0, 1, 1], [2, 0, 0]])
>>> y = arr.take_along_axis(indices, 1)
>>> print(y)
ivy.array([[4, 3, 3], [1, 1, 1]])
"""
return ivy.take_along_axis(self._data, indices, axis, mode=mode, out=out)
@handle_view
def hsplit(
self: ivy.Array,
indices_or_sections: Union[int, Tuple[int, ...]],
/,
*,
copy: Optional[bool] = None,
) -> List[ivy.Array]:
"""ivy.Array instance method variant of ivy.hsplit. This method simply
wraps the function, and so the docstring for ivy.hsplit also applies to
this method with minimal changes.
Parameters
----------
self
Input array.
indices_or_sections
If indices_or_sections is an integer n, the array is split into n
equal sections, provided that n must be a divisor of the split axis.
If indices_or_sections is a sequence of ints or 1-D array,
then input is split at each of the indices.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning
a view of the input array.
Returns
-------
ret
list of arrays split horizontally from input array.
Examples
--------
>>> ary = ivy.array(
[[0., 1., 2., 3.],
[4., 5., 6, 7.],
[8., 9., 10., 11.],
[12., 13., 14., 15.]]
)
>>> ary.hsplit(2)
[ivy.array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
ivy.array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]]))
"""
return ivy.hsplit(self._data, indices_or_sections, copy=copy)
@handle_view
def expand(
self: ivy.Array,
shape: Union[ivy.Shape, ivy.NativeShape],
/,
*,
copy: Optional[bool] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Broadcast the input Array following the given shape and the
broadcast rule.
Parameters
----------
self
Array input.
shape
A 1-D Array indicates the shape you want to expand to,
following the broadcast rule
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy.
In case copy is False we avoid copying by returning
a view of the input array.
out
optional output array, for writing the result to.
Returns
-------
ret
Output Array
"""
return ivy.expand(self._data, shape, copy=copy, out=out)
def as_strided(
self: ivy.Array,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]],
strides: Sequence[int],
/,
) -> ivy.Array:
"""Create a copy of the input array with the given shape and strides.
Parameters
----------
self
Input Array.
shape
The shape of the new array.
strides
The strides of the new array (specified in bytes).
Returns
-------
ret
Output Array
"""
return ivy.as_strided(self._data, shape, strides)
@handle_view
def concat_from_sequence(
self: ivy.Array,
/,
input_sequence: Union[
Tuple[Union[ivy.Array, ivy.NativeArray]],
List[Union[ivy.Array, ivy.NativeArray]],
],
*,
new_axis: int = 0,
axis: int = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Concatenate a sequence of arrays along a new or an existing axis.
Parameters
----------
self
Array input.
input_sequence
A sequence of arrays.
new_axis
Insert and concatenate on a new axis or not,
default 0 means do not insert new axis.
new_axis = 0: concatenate
new_axis = 1: stack
axis
The axis along which the arrays will be concatenated.
out
Optional output array, for writing the result to.
Returns
-------
ret
Output Array
"""
if new_axis == 0:
return ivy.concat_from_sequence(
[self._data] + input_sequence, new_axis=new_axis, axis=axis, out=out
)
elif new_axis == 1:
if not isinstance(input_sequence, (tuple, list)):
input_sequence = [input_sequence]
if isinstance(input_sequence, tuple):
input_sequence = (self._data,) + input_sequence
else:
input_sequence = [self._data] + input_sequence
return ivy.concat_from_sequence(
input_sequence, new_axis=new_axis, axis=axis, out=out
)
@handle_view
def associative_scan(
self: ivy.Array,
fn: Callable,
/,
*,
reverse: bool = False,
axis: int = 0,
) -> ivy.Array:
"""Perform an associative scan over the given array.
Parameters
----------
self
The array to scan over.
fn
The associative function to apply.
reverse
Whether to scan in reverse with respect to the given axis.
axis
The axis to scan over.
Returns
-------
ret
The result of the scan.
"""
return ivy.associative_scan(self._data, fn, reverse=reverse, axis=axis)
def unique_consecutive(
self: ivy.Array,
/,
*,
axis: Optional[int] = None,
) -> Tuple[ivy.Array, ivy.Array, ivy.Array]:
"""ivy.Array instance method variant of ivy.unique_consecutive.
This method simply wraps the function, and so the docstring for
ivy.unique_consecutive also applies to this method with minimal
changes.
"""
return ivy.unique_consecutive(self._data, axis=axis)
def fill_diagonal(
self: ivy.Array,
v: Union[int, float],
/,
*,
wrap: bool = False,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.fill_diag.
This method simply wraps the function, and so the docstring for
ivy.fill_diag also applies to this method with minimal changes.
"""
return ivy.fill_diagonal(self._data, v, wrap=wrap)
def take(
self: ivy.Array,
indices: Union[int, ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[int] = None,
mode: str = "fill",
fill_value: Optional[Number] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.take.
This method simply wraps the function, and so the docstring for
ivy.take also applies to this method with minimal changes.
Parameters
----------
self
input array
indices
array indices. Must have an integer data type.
axis
axis over which to select values. If `axis` is negative,
the function must determine the axis along which to select values
by counting from the last dimension.
By default, the flattened input array is used.
mode
specifies how out-of-bounds `indices` will behave.
- ‘raise’ – raise an error
- ‘wrap’ – wrap around
- ‘clip’ – clip to the range (all indices that are too large are
replaced by the index that addresses the last element along that axis.
Note that this disables indexing with negative numbers.)
- 'fill' (default) = returns invalid values (e.g. NaN)
for out-of bounds indices (see also fill_value below)
fill_value
fill value to return for out-of-bounds slices
(Defaults to NaN for inexact types,
the largest negative value for signed types,
the largest positive value for unsigned types, and True for booleans.)
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
an array having the same data type as `x`.
The output array must have the same rank
(i.e., number of dimensions) as `x` and
must have the same shape as `x`, except
for the axis specified by `axis`
whose size must equal the number of elements in `indices`.
Examples
--------
With `ivy.Array` input:
>>> x = ivy.array([4,5,6])
>>> indices = ivy.array([2,1,0])
>>> y = x.take(indices)
>>> print(y)
ivy.array([6, 5, 4])
>>> x = ivy.array([4.7,5.2,6.5])
>>> indices = ivy.array([[0,1]])
>>> y = ivy.zeros_like(indices, dtype=x.dtype)
>>> x.take(indices, out=y)
>>> print(y)
ivy.array([[4.7, 5.2]])
>>> x = ivy.array([False, False, True])
>>> indices = ivy.array([[4,3,2]])
>>> y = ivy.zeros_like(indices, dtype=x.dtype)
>>> x.take(indices, out=y, mode="wrap")
>>> print(y)
ivy.array([[False, False, True]])
"""
return ivy.take(
self, indices, axis=axis, mode=mode, fill_value=fill_value, out=out
)
def unflatten(
self: ivy.Array,
/,
shape: Union[Tuple[int], ivy.Array, ivy.NativeArray],
dim: Optional[int] = 0,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.unflatten. This method
simply wraps the function, and so the docstring for ivy.unflatten also
applies to this method with minimal changes.
Parameters
----------
self
input array
shape
array indices. Must have an integer data type.
dim
axis over which to unflatten. If `axis` is negative,
the function must determine the axis along which to select values
by counting from the last dimension.
By default, the flattened input array is used.
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
an array having the same data type as `x`.
The output array must have the same rank
(i.e., number of dimensions) as `x` and
must have the same shape as `x`,
except for the axis specified by `dim`
which is replaced with a tuple specified in `shape`.
Examples
--------
With 'ivy.Array' input:
>>> x = ivy.array([[1.2, 2.3, 3.4, 4.5],
... [5.6, 6.7, 7.8, 8.9]])
>>> dim = 1
>>> shape = (2, 2)
>>> y = ivy.zeros([2, 2, 2])
>>> x.unflatten(shape=shape, dim=dim, out=y)
>>> print(y)
ivy.array([[[1.2, 2.3], [3.4, 4.5]], [[5.6, 6.7], [7.8, 8.9]]])
"""
return ivy.unflatten(
self._data,
shape=shape,
dim=dim,
out=out,
)
def trim_zeros(
self: ivy.Array,
/,
*,
trim: Optional[str] = "fb",
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.trim_zeros.
This method simply wraps the function, and so the docstring for
ivy.trim_zeros also applies to this method with minimal changes.
Parameters
----------
self : 1-D array
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
1-D array
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = ivy.array([0, 0, 0, 0, 8, 3, 0, 0, 7, 1, 0])
>>> ivy.trim_zeros(a)
array([8, 3, 0, 0, 7, 1])
>>> ivy.trim_zeros(a, 'b')
array([0, 0, 0, 0, 8, 3, 0, 0, 7, 1])
>>> ivy.trim_zeros([0, 8, 3, 0, 0])
[8, 3]
"""
return ivy.trim_zeros(self, trim=trim)
def unfold(
self: Union[ivy.Array, ivy.NativeArray],
/,
mode: Optional[int] = 0,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.unfold. This method simply
wraps the function, and so the docstring for ivy.unfold also applies to
this method with minimal changes.
Parameters
----------
self
input tensor to be unfolded
mode
indexing starts at 0, therefore mode is in ``range(0, tensor.ndim)``
out
optional output array, for writing the result to.
Returns
-------
ret
unfolded_tensor of shape ``(tensor.shape[mode], -1)``
"""
return ivy.unfold(self._data, mode, out=out)
def fold(
self: Union[ivy.Array, ivy.NativeArray],
/,
mode: int,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]],
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.fold. This method simply
wraps the function, and so the docstring for ivy.fold also applies to
this method with minimal changes.
Parameters
----------
input
unfolded tensor of shape ``(shape[mode], -1)``
mode
the mode of the unfolding
shape
shape of the original tensor before unfolding
out
optional output array, for writing the result to.
Returns
-------
ret
folded_tensor of shape `shape`
"""
return ivy.fold(self._data, mode, shape, out=out)
def partial_unfold(
self: Union[ivy.Array, ivy.NativeArray],
/,
mode: Optional[int] = 0,
skip_begin: Optional[int] = 1,
skip_end: Optional[int] = 0,
ravel_tensors: Optional[bool] = False,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.partial_unfold. This method
simply wraps the function, and so the docstring for ivy.partial_unfold
also applies to this method with minimal changes.
Parameters
----------
self
tensor of shape n_samples x n_1 x n_2 x ... x n_i
mode
indexing starts at 0, therefore mode is in range(0, tensor.ndim)
skip_begin
number of dimensions to leave untouched at the beginning
skip_end
number of dimensions to leave untouched at the end
ravel_tensors
if True, the unfolded tensors are also flattened
out
optional output array, for writing the result to.
Returns
-------
ret
partially unfolded tensor
"""
return ivy.partial_unfold(
self._data,
mode=mode,
skip_begin=skip_begin,
skip_end=skip_end,
ravel_tensors=ravel_tensors,
out=out,
)
def partial_fold(
self: Union[ivy.Array, ivy.NativeArray],
/,
mode: int,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]],
skip_begin: Optional[int] = 1,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.partial_fold. This method
simply wraps the function, and so the docstring for ivy.partial_fold
also applies to this method with minimal changes.
Parameters
----------
x
a partially unfolded tensor
mode
indexing starts at 0, therefore mode is in range(0, tensor.ndim)
shape
the shape of the original full tensor (including skipped dimensions)
skip_begin
number of dimensions left untouched at the beginning
out
optional output array, for writing the result to.
Returns
-------
partially re-folded tensor
"""
return ivy.partial_fold(self._data, mode, shape, skip_begin, out=out)
def partial_tensor_to_vec(
self: Union[ivy.Array, ivy.NativeArray],
/,
skip_begin: Optional[int] = 1,
skip_end: Optional[int] = 0,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.partial_tensor_to_vec. This
method simply wraps the function, and so the docstring for
ivy.partial_tensor_to_vec also applies to this method with minimal
changes.
Parameters
----------
x
tensor to partially vectorise
skip_begin
number of dimensions to leave untouched at the beginning
skip_end
number of dimensions to leave untouched at the end
out
optional output array, for writing the result to.
Returns
-------
partially vectorised tensor with the
`skip_begin` first and `skip_end` last dimensions untouched
"""
return ivy.partial_tensor_to_vec(self._data, skip_begin, skip_end, out=out)
def partial_vec_to_tensor(
self: Union[ivy.Array, ivy.NativeArray],
/,
shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]],
skip_begin: Optional[int] = 1,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.partial_vec_to_tensor. This
method simply wraps the function, and so the docstring for
ivy.partial_vec_to_tensor also applies to this method with minimal
changes.
Parameters
----------
x
a partially vectorised tensor
shape
the shape of the original full tensor (including skipped dimensions)
skip_begin
number of dimensions to leave untouched at the beginning
out
optional output array, for writing the result to.
Returns
-------
ret
full tensor
"""
return ivy.partial_vec_to_tensor(self._data, shape, skip_begin, out=out)
def matricize(
self: Union[ivy.Array, ivy.NativeArray],
/,
row_modes: Sequence[int],
column_modes: Optional[Sequence[int]] = None,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.matricize. This method
simply wraps the function, and so the docstring for ivy.matricize also
applies to this method with minimal changes.
Parameters
----------
self
the input tensor
row_modes
modes to use as row of the matrix (in the desired order)
column_modes
modes to use as column of the matrix, in the desired order
if None, the modes not in `row_modes` will be used in ascending order
out
optional output array, for writing the result to.
ret
-------
ivy.Array : tensor of size (ivy.prod(x.shape[i] for i in row_modes), -1)
"""
return ivy.matricize(self._data, row_modes, column_modes, out=out)
def soft_thresholding(
self: Union[ivy.Array, ivy.NativeArray],
/,
threshold: Union[float, ivy.Array, ivy.NativeArray],
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.soft_thresholding. This
method simply wraps the function, and so the docstring for
ivy.soft_thresholding also applies to this method with minimal changes.
Parameters
----------
x
input array
threshold
float or array with shape tensor.shape
* If float the threshold is applied to the whole tensor
* If array, one threshold is applied per elements, 0 values are ignored
out
optional output array, for writing the result to.
Returns
-------
ivy.Array
thresholded tensor on which the operator has been applied
"""
return ivy.soft_thresholding(self._data, threshold, out=out)
def column_stack(
self: ivy.Array,
arrays: Sequence[Union[ivy.Array, ivy.NativeArray]],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.column_stack.
This method simply wraps the function, and so the docstring for
ivy.column_stack also applies to this method with minimal
changes.
Parameters
----------
self
Array that will be stacked at the beginning of the provided array iterable.
arrays
Arrays to be stacked.
out
Output array.
Returns
-------
ret
Stacked input.
"""
if not isinstance(arrays, (list, tuple)):
arrays = [arrays]
if isinstance(arrays, tuple):
x = (self._data) + arrays
else:
x = [self._data] + arrays
return ivy.column_stack(x, out=out)
def put_along_axis(
self: ivy.Array,
indices: ivy.Array,
values: ivy.Array,
axis: int,
/,
*,
mode: Literal["sum", "min", "max", "mul", "mean", "replace"] = "replace",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.put_along_axis.
This method simply wraps the function, and so the docstring for
ivy.put_along_axis also applies to this method with minimal
changes.
"""
return ivy.put_along_axis(self._data, indices, values, axis, mode=mode, out=out)
| ivy/ivy/data_classes/array/experimental/manipulation.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/manipulation.py",
"repo_id": "ivy",
"token_count": 23636
} | 8 |
# global
import abc
from typing import Optional, Union
# local
import ivy
class _ArrayWithRandom(abc.ABC):
def random_uniform(
self: ivy.Array,
/,
*,
high: Union[float, ivy.Array, ivy.NativeArray] = 1.0,
shape: Optional[Union[ivy.Array, ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.random_uniform. This method
simply wraps the function, and so the docstring for ivy.random_uniform
also applies to this method with minimal changes.
Parameters
----------
self
Lower boundary of the output interval. All values generated will be
greater than or equal to ``low``. If array, must have same shape as
``high``.
high
Upper boundary of the output interval. All the values generated will be
less than ``high``. If array, must have same shape as ``low``.
shape
If the given shape is, e.g ``(m, n, k)``, then ``m * n * k`` samples
are drawn. Can only be specified when ``low`` and ``high`` are numeric
values, else exception will be raised.
Default is ``None``, where a single value is returned.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the parameterized uniform distribution.
Examples
--------
>>> x = ivy.array([[9.8, 3.4], [5.8, 7.2]])
>>> x.random_uniform(high=10.2)
ivy.array([[9.86, 4.89],
[7.06, 7.47]])
>>> x.random_uniform(high=10.2, device='cpu')
ivy.array([[9.86, 4.89],
[7.06, 7.47]])
>>> x.random_uniform(high=14.2, dtype='float16')
ivy.array([[9.86, 4.89],
[7.06, 7.47]])
>>> x.random_uniform(high=10.8, device='cpu', dtype='float64')
ivy.array([[9.86, 4.89],
[7.06, 7.47]])
>>> z = ivy.ones((2,2))
>>> x.random_uniform(high=11.2, device='cpu', dtype='float64', out=z)
ivy.array([[10.1 , 6.53],
[ 7.94, 8.85]])
>>> x = ivy.array([8.7, 9.3])
>>> y = ivy.array([12.8, 14.5])
>>> x.random_uniform(y)
ivy.array([12.1, 14. ])
>>> x.random_uniform(high=y, device='cpu')
ivy.array([12.1, 14. ])
>>> x.random_uniform(high=y, dtype='float16')
ivy.array([12.1, 14. ])
>>> x.random_uniform(high=y, device='cpu', dtype='float64')
ivy.array([12.1, 14. ])
>>> z = ivy.ones((2,))
>>> x.random_uniform(high=y, device='cpu', dtype='float64', out=z)
ivy.array([12.1, 14. ])
"""
return ivy.random_uniform(
low=self._data,
high=high,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
def random_normal(
self: ivy.Array,
/,
*,
std: Union[float, ivy.Array, ivy.NativeArray] = 1.0,
shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.random_normal. This method
simply wraps the function, and so the docstring for ivy.random_normal
also applies to this method with minimal changes.
Parameters
----------
self
The mean of the normal distribution to sample from. Default is ``0.0``.
std
The standard deviation of the normal distribution to sample from.
Must be non-negative. Default is ``1.0``.
shape
If the given shape is, e.g ``(m, n, k)``, then ``m * n * k`` samples
are drawn. Can only be specified when ``mean`` and ``std`` are numeric
values, else exception will be raised.
Default is ``None``, where a single value is returned.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the parameterized normal distribution.
Examples
--------
>>> x = ivy.array([[9.8, 3.4], [5.8, 7.2]])
>>> x.random_normal(std=10.2)
ivy.array([[19. , -6.44 ],
[ 5.72 , 0.235]])
>>> x.random_normal(std=10.2, device='cpu')
ivy.array([[18.7 , 25.2 ],
[27.5 , -3.22]])
>>> x.random_normal(std=14.2, dtype='float16')
ivy.array([[26.6 , 12.1 ],
[ 4.56, 5.49]])
>>> x.random_normal(std=10.8, device='cpu', dtype='float64')
ivy.array([[ 1.02, -1.39],
[14.2 , -1. ]])
>>> z = ivy.ones((2,2))
>>> x.random_normal(std=11.2, device='cpu', dtype='float64', out=z)
ivy.array([[ 7.72, -8.32],
[ 4.95, 15.8 ]])
>>> x = ivy.array([8.7, 9.3])
>>> y = ivy.array([12.8, 14.5])
>>> x.random_normal(std=y)
ivy.array([-10.8, 12.1])
>>> x.random_normal(std=y, device='cpu')
ivy.array([ 13. , -26.9])
>>> x.random_normal(std=y, dtype='float16')
ivy.array([14.3 , -0.807])
>>> x.random_normal(std=y, device='cpu', dtype='float64')
ivy.array([21.3 , 3.85])
>>> z = ivy.ones((2,))
>>> x.random_normal(std=y, device='cpu', dtype='float64', out=z)
ivy.array([ 4.32, 42.2 ])
"""
return ivy.random_normal(
mean=self._data,
std=std,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
def multinomial(
self: ivy.Array,
population_size: int,
num_samples: int,
/,
*,
batch_size: int = 1,
replace: bool = True,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.multinomial. This method
simply wraps the function, and so the docstring for ivy.multinomial
also applies to this method with minimal changes.
Parameters
----------
self
The unnormalized probabilities for all elements in population,
default is uniform *[batch_shape, population_size]*
population_size
The size of the population from which to draw samples.
num_samples
Number of independent samples to draw from the population.
batch_size
Number of tensors to generate. Default is 1.
replace
Whether to replace samples once they've been drawn. Default is ``True``.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None)
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the parameterized normal distribution.
"""
return ivy.multinomial(
population_size,
num_samples,
batch_size=batch_size,
probs=self._data,
replace=replace,
device=device,
seed=seed,
out=out,
)
def randint(
self: ivy.Array,
high: Union[int, ivy.Array, ivy.NativeArray],
/,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.randint. This method simply
wraps the function, and so the docstring for ivy.randint also applies
to this method with minimal changes.
Parameters
----------
self
Lowest integer that can be drawn from the distribution.
high
One above the highest integer that can be drawn from the distribution.
shape
If the given shape is, e.g ``(m, n, k)``, then ``m * n * k`` samples
are drawn. Can only be specified when ``low`` and ``high`` are numeric
values, else exception will be raised.
Default is ``None``, where a single value is returned.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default integer data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Returns an array with the given shape filled with integers from
the uniform distribution in the “half-open” interval [low, high)
Examples
--------
>>> x = ivy.array([[1, 2], [0, 5]])
>>> x.randint(10)
ivy.array([[1, 5],
[9, 7]])
>>> x.randint(8, device='cpu')
ivy.array([[6, 5],
[0, 5]])
>>> x.randint(9, dtype='int8')
ivy.array([[1, 2],
[7, 7]])
>>> x.randint(14, device='cpu', dtype='int16')
ivy.array([[6, 5],
[0, 5]])
>>> z = ivy.ones((2,2))
>>> x.randint(16, device='cpu', dtype='int64', out=z)
ivy.array([[1, 2],
[7, 7]])
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([23, 25, 98])
>>> x.randint(y)
ivy.array([ 5, 14, 18])
>>> x.randint(y, device='cpu')
ivy.array([20, 13, 46])
>>> x.randint(y, dtype='int32')
ivy.array([ 9, 18, 33])
>>> x.randint(y, device='cpu', dtype='int16')
ivy.array([ 9, 20, 85])
>>> z = ivy.ones((3,))
>>> x.randint(y, device='cpu', dtype='int64', out=z)
ivy.array([20, 13, 46])
"""
return ivy.randint(
self._data,
high,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
def shuffle(
self: ivy.Array,
axis: Optional[int] = 0,
/,
*,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.shuffle. This method simply
wraps the function, and so the docstring for ivy.shuffle also applies
to this method with minimal changes.
Parameters
----------
self
Input array. Should have a numeric data type.
axis
The axis which x is shuffled along. Default is 0.
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
An array object, shuffled along the first dimension.
Examples
--------
>>> x = ivy.array([5, 2, 9])
>>> y = x.shuffle()
>>> print(y)
ivy.array([2, 5, 9])
"""
return ivy.shuffle(self, axis, seed=seed, out=out)
| ivy/ivy/data_classes/array/random.py/0 | {
"file_path": "ivy/ivy/data_classes/array/random.py",
"repo_id": "ivy",
"token_count": 6435
} | 9 |
from .activations import _ContainerWithActivationExperimental
from .conversions import _ContainerWithConversionExperimental
from .creation import _ContainerWithCreationExperimental
from .data_type import _ContainerWithData_typeExperimental
from .device import _ContainerWithDeviceExperimental
from .elementwise import _ContainerWithElementWiseExperimental
from .general import _ContainerWithGeneralExperimental
from .gradients import _ContainerWithGradientsExperimental
from .image import _ContainerWithImageExperimental
from .layers import _ContainerWithLayersExperimental
from .linear_algebra import _ContainerWithLinearAlgebraExperimental
from .manipulation import _ContainerWithManipulationExperimental
from .norms import _ContainerWithNormsExperimental
from .random import _ContainerWithRandomExperimental
from .searching import _ContainerWithSearchingExperimental
from .set import _ContainerWithSetExperimental
from .sorting import _ContainerWithSortingExperimental
from .statistical import _ContainerWithStatisticalExperimental
from .utility import _ContainerWithUtilityExperimental
from .losses import _ContainerWithLossesExperimental
| ivy/ivy/data_classes/container/experimental/__init__.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/__init__.py",
"repo_id": "ivy",
"token_count": 259
} | 10 |
# global
from typing import Optional, Union, List, Dict, Tuple
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithSearchingExperimental(ContainerBase):
@staticmethod
def static_unravel_index(
indices: ivy.Container,
shape: Union[Tuple[int], ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.unravel_index. This
method simply wraps the function, and so the docstring for
ivy.unravel_index also applies to this method with minimal changes.
Parameters
----------
indices
Input container including arrays.
shape
The shape of the array to use for unraveling indices.
out
optional output array, for writing the result to.
Returns
-------
ret
Container with tuples that have arrays with the same shape as
the arrays in the input container.
Examples
--------
With one :class:`ivy.Container` input:
>>> indices = ivy.Container(a=ivy.array([22, 41, 37])), b=ivy.array([30, 2]))
>>> ivy.Container.static_unravel_index(indices, (7,6))
{
a: (ivy.array([3, 6, 6]), ivy.array([4, 5, 1]))
b: (ivy.array([5, 0], ivy.array([0, 2])))
}
"""
return ContainerBase.cont_multi_map_in_function(
"unravel_index",
indices,
shape,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def unravel_index(
self: ivy.Container,
shape: Union[Tuple[int], ivy.Container],
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.unravel_index. This
method simply wraps the function, and so the docstring for
ivy.unravel_index also applies to this method with minimal changes.
Parameters
----------
self
Input container including arrays.
shape
The shape of the array to use for unraveling indices.
out
optional output array, for writing the result to.
Returns
-------
ret
Container with tuples that have arrays with the same shape as
the arrays in the input container.
Examples
--------
With one :class:`ivy.Container` input:
>>> indices = ivy.Container(a=ivy.array([22, 41, 37])), b=ivy.array([30, 2]))
>>> indices.unravel_index((7, 6))
{
a: (ivy.array([3, 6, 6]), ivy.array([4, 5, 1]))
b: (ivy.array([5, 0], ivy.array([0, 2])))
}
"""
return self.static_unravel_index(self, shape, out=out)
| ivy/ivy/data_classes/container/experimental/searching.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/searching.py",
"repo_id": "ivy",
"token_count": 1491
} | 11 |
# global
from typing import Optional, List, Union, Dict, Literal
# local
from ivy.data_classes.container.base import ContainerBase
import ivy
# ToDo: implement all methods here as public instance methods
# noinspection PyMissingConstructor
class _ContainerWithSorting(ContainerBase):
@staticmethod
def _static_argsort(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Union[int, ivy.Container] = -1,
descending: Union[bool, ivy.Container] = False,
stable: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.argsort. This method
simply wraps the function, and so the docstring for ivy.argsort also
applies to this method with minimal changes.
Parameters
----------
x
input array or container. Should have a numeric data type.
axis
axis along which to sort. If set to ``-1``, the function must sort
along the last axis. Default: ``-1``.
descending
sort order. If ``True``, the returned indices sort
``x`` in descending order (by value). If ``False``,
the returned indices sort ``x`` in ascending order
(by value). Default: ``False``.
stable
sort stability. If ``True``, the returned indices must maintain
the relative order of ``x`` values which compare as equal.
If ``False``, the returned indices may or may not maintain
the relative order of ``x`` values which compare as equal (i.e., the
relative order of ``x`` values which compare as equal
is implementation-dependent). Default: ``True``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the index values of sorted
array. The returned array must have a
data type determined by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([7, 2, 1]),
... b=ivy.array([3, 2]))
>>> y = ivy.Container.static_argsort(x, axis=-1, descending=True, stable=False)
>>> print(y)
{
a: ivy.array([0, 1, 2]),
b: ivy.array([0, 1])
}
>>> x = ivy.Container(a=ivy.array([7, 2, 1]),
... b=ivy.array([[3, 2], [7, 0.2]]))
>>> y = ivy.Container.static_argsort(x, axis=-1, descending=True, stable=False)
>>> print(y)
{
a: ivy.array([0, 1, 2]),
b: ivy.array([[0, 1]],[0, 1]])
}
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([2, 5, 1]),
... b=ivy.array([1, 5], [.2,.1]))
>>> y = ivy.Container.static_argsort(x,axis=-1, descending=True, stable=False)
>>> print(y)
{
a: ivy.array([2, 0, 1]),
b: ivy.array([[1, 0],[0,1]])
}
>>> x = ivy.Container(a=ivy.native_array([2, 5, 1]),
... b=ivy.array([1, 5], [.2,.1]))
>>> y = ivy.Container.static_argsort(x, axis=-1, descending=True, stable=False)
>>> print(y)
{
a: ivy.array([2, 0, 1]),
b: ivy.array([[1, 0],[0,1]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"argsort",
x,
axis=axis,
descending=descending,
stable=stable,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def argsort(
self: ivy.Container,
/,
*,
axis: Union[int, ivy.Container] = -1,
descending: Union[bool, ivy.Container] = False,
stable: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.argsort. This method
simply wraps the function, and so the docstring for ivy.argsort also
applies to this method with minimal changes.
Parameters
----------
self
input array or container. Should have a numeric data type.
axis
axis along which to sort. If set to ``-1``, the function
must sort along the last axis. Default: ``-1``.
descending
sort order. If ``True``, the returned indices sort ``x``
in descending order (by value). If ``False``, the
returned indices sort ``x`` in ascending order (by value).
Default: ``False``.
stable
sort stability. If ``True``, the returned indices must
maintain the relative order of ``x`` values which compare
as equal. If ``False``, the returned indices may or may not
maintain the relative order of ``x`` values which compare
as equal (i.e., the relative order of ``x`` values which
compare as equal is implementation-dependent).
Default: ``True``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
a container containing the index values of sorted array.
The returned array must have a data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([7, 2, 1]),
... b=ivy.array([3, 2]))
>>> y = x.argsort(axis=-1, descending=True, stable=False)
>>> print(y)
{
a: ivy.array([0, 1, 2]),
b: ivy.array([0, 1])
}
"""
return self._static_argsort(
self,
axis=axis,
descending=descending,
stable=stable,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_sort(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Union[int, ivy.Container] = -1,
descending: Union[bool, ivy.Container] = False,
stable: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sort. This method simply
wraps the function, and so the docstring for ivy.sort also applies to
this method with minimal changes.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([5, 9, 0.2]),
... b=ivy.array([[8, 1], [5, 0.8]]))
>>> y = ivy.Container.static_sort(x)
>>> print(y)
{
a: ivy.array([0.2, 5., 9.]),
b: ivy.array([[1., 8.], [0.8, 5.]])
}
>>> x = ivy.Container(a=ivy.array([8, 0.5, 6]),
... b=ivy.array([[9, 0.7], [0.4, 0]]))
>>> y = ivy.Container.static_sort(x)
>>> print(y)
{
a: ivy.array([0.5, 6., 8.]),
b: ivy.array([[0.7, 9.], [0., 0.4]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sort",
x,
axis=axis,
descending=descending,
stable=stable,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def sort(
self: ivy.Container,
/,
*,
axis: Union[int, ivy.Container] = -1,
descending: Union[bool, ivy.Container] = False,
stable: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sort. This method
simply wraps the function, and so the docstring for ivy.sort also
applies to this method with minimal changes.
Examples
--------
>>> x = ivy.Container(a=ivy.array([5, 9, 0.2]),
... b=ivy.array([8, 1]))
>>> y = x.sort()
>>> print(y)
{
a: ivy.array([0.2, 5., 9.]),
b: ivy.array([1, 8])
}
>>> x = ivy.Container(a=ivy.array([5, 9, 0.2]),
... b=ivy.array([[8, 1], [5, 0.8]]))
>>> y = x.sort()
>>> print(y)
{
a: ivy.array([0.2, 5., 9.]),
b: ivy.array([[1., 8.], [0.8, 5.]])
}
>>> x = ivy.Container(a=ivy.array([8, 0.5, 6]),
... b=ivy.array([[9, 0.7], [0.4, 0]]))
>>> y = ivy.sort(x)
>>> print(y)
{
a: ivy.array([0.5, 6., 8.]),
b: ivy.array([[0.7, 9.],[0., 0.4]])
}
>>> x = ivy.Container(a=ivy.native_array([8, 0.5, 6]),
... b=ivy.array([[9, 0.7], [0.4, 0]]))
>>> y = ivy.sort(x)
>>> print(y)
{
a: ivy.array([0.5, 6., 8.]),
b: ivy.array([[0.7, 9.],[0., 0.4]])
}
"""
return self._static_sort(
self,
axis=axis,
descending=descending,
stable=stable,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_msort(
a: Union[ivy.Array, ivy.NativeArray, ivy.Container, list, tuple],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.msort. This method simply
wraps the function, and so the docstring for ivy.msort also applies to
this method with minimal changes.
Parameters
----------
a
array-like or container input.
out
optional output container, for writing the result to.
Returns
-------
ret
a container containing sorted input arrays.
Examples
--------
With :class:`ivy.Container` input:
>>> a = ivy.Container(x = ivy.asarray([[8, 9, 6],[6, 2, 6]]),
... y = ivy.asarray([[7, 2],[3, 4]])
>>> ivy.Container.static_lexsort(a)
{
x: ivy.array(
[[6, 2, 6],
[8, 9, 6]]
),
y: ivy.array(
[[3, 4],
[7, 2]]
)
}
"""
return ContainerBase.cont_multi_map_in_function(
"msort",
a,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def msort(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.msort. This method
simply wraps the function, and so the docstring for ivy.msort also
applies to this method with minimal changes.
Parameters
----------
self
input container with array-like inputs to sort.
out
optional output container, for writing the result to.
Returns
-------
ret
a container containing the sorted input arrays.
Examples
--------
>>> a = ivy.Container(x = ivy.asarray([[8, 9, 6],[6, 2, 6]]),
... y = ivy.asarray([[7, 2],[3, 4]])
>>> a.msort()
{
x: ivy.array(
[[6, 2, 6],
[8, 9, 6]]
),
y: ivy.array(
[[3, 4],
[7, 2]]
)
}
"""
return self.static_msort(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_searchsorted(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
v: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
side: Union[str, ivy.Container] = "left",
sorter: Optional[
Union[ivy.Array, ivy.NativeArray, ivy.Container, List[int]]
] = None,
ret_dtype: Union[ivy.Dtype, ivy.Container] = ivy.int64,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.searchsorted.
This method simply wraps the function, and so the docstring for
ivy.searchsorted also applies to this method with minimal
changes.
"""
return ContainerBase.cont_multi_map_in_function(
"searchsorted",
x1,
v,
side=side,
sorter=sorter,
ret_dtype=ret_dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def searchsorted(
self: ivy.Container,
v: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
side: Union[Literal["left", "right"], ivy.Container] = "left",
sorter: Optional[
Union[ivy.Array, ivy.NativeArray, List[int], ivy.Container]
] = None,
ret_dtype: Union[ivy.Dtype, ivy.NativeDtype, ivy.Container] = ivy.int64,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.searchsorted.
This method simply wraps the function, and so the docstring for
ivy.searchsorted also applies to this method with minimal
changes.
"""
return self._static_searchsorted(
self,
v,
side=side,
sorter=sorter,
ret_dtype=ret_dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
| ivy/ivy/data_classes/container/sorting.py/0 | {
"file_path": "ivy/ivy/data_classes/container/sorting.py",
"repo_id": "ivy",
"token_count": 8973
} | 12 |
[package]
name = "xlar"
version = "0.1.0"
edition = "2021"
[lib]
name = "xlar"
crate-type = ["cdylib"]
[dependencies]
thiserror = "1"
libc = "0.2"
num-traits = "0.2"
num-derive = "0.3"
zip = "0.6.4"
pyo3 = { version = "0.19.1", features = ["extension-module"] }
ndarray = "0.15.6"
numpy = "0.19.0"
half = "2.3.1"
[build-dependencies]
bindgen = "0.64"
cc = "1.0"
[dev-dependencies]
anyhow = "1.0"
clap = { version = "4.2.4", features = ["derive"] }
fancy-regex = "0.11.0"
rand = "0.8.5"
serde_json = "1.0.96"
| ivy/ivy/engines/XLA/rust_api/Cargo.toml/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/Cargo.toml",
"repo_id": "ivy",
"token_count": 261
} | 13 |
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
#pragma GCC diagnostic ignored "-Wreturn-type"
#include "tensorflow/compiler/xla/client/client_library.h"
#include "tensorflow/compiler/xla/client/lib/constants.h"
#include "tensorflow/compiler/xla/client/lib/matrix.h"
#include "tensorflow/compiler/xla/client/lib/math.h"
#include "tensorflow/compiler/xla/client/padding.h"
#include "tensorflow/compiler/xla/client/xla_builder.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/pjrt/gpu/gpu_helpers.h"
#include "tensorflow/compiler/xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include "tensorflow/compiler/xla/pjrt/pjrt_client.h"
#include "tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.h"
#include "tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.h"
#include "tensorflow/compiler/xla/pjrt/tpu_client.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#pragma GCC diagnostic pop
using namespace xla;
extern "C" {
typedef std::shared_ptr<PjRtClient> *pjrt_client;
typedef PjRtLoadedExecutable *pjrt_loaded_executable;
typedef PjRtDevice *pjrt_device;
typedef PjRtBuffer *pjrt_buffer;
typedef XlaBuilder *xla_builder;
typedef XlaOp *xla_op;
typedef Status *status;
typedef Shape *shape;
typedef Literal *literal;
typedef XlaComputation *xla_computation;
typedef HloModule *hlo_module;
typedef HloModuleProto *hlo_module_proto;
typedef HloComputation *hlo_computation;
#else
typedef struct _pjrt_client *pjrt_client;
typedef struct _pjrt_loaded_executable *pjrt_loaded_executable;
typedef struct _pjrt_device *pjrt_device;
typedef struct _pjrt_buffer *pjrt_buffer;
typedef struct _xla_builder *xla_builder;
typedef struct _xla_op *xla_op;
typedef struct _status *status;
typedef struct _shape *shape;
typedef struct _literal *literal;
typedef struct _xla_computation *xla_computation;
typedef struct _hlo_module *hlo_module;
typedef struct _hlo_module_proto *hlo_module_proto;
typedef struct _hlo_computation *hlo_computation;
#endif
status pjrt_cpu_client_create(pjrt_client *);
status pjrt_gpu_client_create(pjrt_client *, double, bool);
status pjrt_tpu_client_create(pjrt_client *, int);
void pjrt_client_free(pjrt_client);
int pjrt_client_device_count(pjrt_client);
int pjrt_client_addressable_device_count(pjrt_client);
void pjrt_client_devices(pjrt_client, pjrt_device *);
void pjrt_client_addressable_devices(pjrt_client, pjrt_device *);
char *pjrt_client_platform_name(pjrt_client);
char *pjrt_client_platform_version(pjrt_client);
void pjrt_loaded_executable_free(pjrt_loaded_executable);
int pjrt_device_id(pjrt_device);
int pjrt_device_process_index(pjrt_device);
int pjrt_device_local_hardware_id(pjrt_device);
status pjrt_device_transfer_to_infeed(pjrt_device, const literal);
status pjrt_device_transfer_from_outfeed(pjrt_device, literal);
char *pjrt_device_kind(pjrt_device);
char *pjrt_device_debug_string(pjrt_device);
char *pjrt_device_to_string(pjrt_device);
status pjrt_buffer_from_host_literal(const pjrt_client, const pjrt_device,
const literal, pjrt_buffer *);
status pjrt_buffer_from_host_buffer(const pjrt_client, const pjrt_device,
const void *, int, int, const int64_t *,
pjrt_buffer *);
status pjrt_buffer_to_literal_sync(pjrt_buffer, literal *);
status pjrt_buffer_copy_raw_to_host_sync(pjrt_buffer, void *, size_t, size_t);
shape pjrt_buffer_on_device_shape(pjrt_buffer);
status pjrt_buffer_copy_to_device(pjrt_buffer, pjrt_device, pjrt_buffer *);
void pjrt_buffer_free(pjrt_buffer);
xla_builder xla_builder_create(const char *);
void xla_builder_free(xla_builder);
xla_op constant_literal(const xla_builder, const literal);
xla_op parameter(const xla_builder, int64_t, int, int, const int64_t *,
const char *);
xla_op parameter_s(const xla_builder, int64_t, const shape, const char *);
xla_op infeed(const xla_builder, int, int, const int64_t *, const char *);
void outfeed(const xla_op, int, int, const int64_t *, const char *);
// Ops
xla_op op_add(const xla_op, const xla_op);
xla_op op_sub(const xla_op, const xla_op);
xla_op op_mul(const xla_op, const xla_op);
xla_op op_div(const xla_op, const xla_op);
xla_op op_rem(const xla_op, const xla_op);
xla_op op_max(const xla_op, const xla_op);
xla_op op_min(const xla_op, const xla_op);
xla_op op_and(const xla_op, const xla_op);
xla_op op_or(const xla_op, const xla_op);
xla_op op_xor(const xla_op, const xla_op);
xla_op op_atan2(const xla_op, const xla_op);
xla_op op_pow(const xla_op, const xla_op);
xla_op op_dot(const xla_op, const xla_op);
xla_op op_dot_general(const xla_op, const xla_op, const int64_t *, size_t,
const int64_t *, size_t, const int64_t *, size_t,
const int64_t *, size_t);
xla_op op_eq(const xla_op, const xla_op);
xla_op op_ne(const xla_op, const xla_op);
xla_op op_ge(const xla_op, const xla_op);
xla_op op_gt(const xla_op, const xla_op);
xla_op op_le(const xla_op, const xla_op);
xla_op op_lt(const xla_op, const xla_op);
xla_op op_shift_left(const xla_op, const xla_op);
xla_op op_shift_right_arith(const xla_op, const xla_op);
xla_op op_shift_right_logic(const xla_op, const xla_op);
xla_op op_population_count(const xla_op);
xla_op op_not(const xla_op);
xla_op op_abs(const xla_op);
xla_op op_exp(const xla_op);
xla_op op_expm1(const xla_op);
xla_op op_floor(const xla_op);
xla_op op_ceil(const xla_op);
xla_op op_round(const xla_op);
xla_op op_round_nearest_even(const xla_op);
xla_op op_log(const xla_op);
xla_op op_log1p(const xla_op);
xla_op op_logistic(const xla_op);
xla_op op_sign(const xla_op);
xla_op op_clz(const xla_op);
xla_op op_cos(const xla_op);
xla_op op_sin(const xla_op);
xla_op op_tanh(const xla_op);
xla_op op_real(const xla_op);
xla_op op_imag(const xla_op);
xla_op op_conj(const xla_op);
xla_op op_square(const xla_op);
xla_op op_sqrt(const xla_op);
xla_op op_rsqrt(const xla_op);
xla_op op_cbrt(const xla_op);
xla_op op_is_finite(const xla_op);
xla_op op_neg(const xla_op);
xla_op op_lower_triangle(const xla_op);
xla_op op_upper_triangle(const xla_op);
xla_op op_erf(const xla_op);
xla_op op_einsum1(const xla_op, const char *);
xla_op op_einsum2(const xla_op, const xla_op, const char *);
xla_op op_copy(const xla_op);
xla_op op_clone(const xla_op);
xla_op op_zeros_like(const xla_op);
xla_op op_zero_like(const xla_op);
xla_op op_zero(const xla_builder, int);
xla_op op_one(const xla_builder, int);
xla_op op_min_value(const xla_builder, int);
xla_op op_max_value(const xla_builder, int);
xla_op op_reshape(const xla_op, size_t, const int64_t *);
xla_op op_dynamic_reshape(const xla_op, size_t, const xla_op *, size_t, const int64_t *, const bool *);
xla_op op_broadcast(const xla_op, size_t, const int64_t *);
xla_op op_broadcast_in_dim(const xla_op, size_t, const int64_t *, size_t,
const int64_t *);
xla_op op_collapse(const xla_op, size_t, const int64_t *);
xla_op op_transpose(const xla_op, size_t, const int64_t *);
xla_op op_clamp(const xla_op, const xla_op, const xla_op);
xla_op op_select(const xla_op, const xla_op, const xla_op);
xla_op op_call(const xla_builder, const xla_computation, size_t, const xla_op *);
xla_op op_map(const xla_builder, size_t, const xla_op *, const xla_computation, size_t, const int64_t *, size_t, const xla_op *);
xla_op op_rng_uniform(const xla_op, const xla_op, int, int, const int64_t *);
xla_op op_rng_normal(const xla_op, const xla_op, int, int, const int64_t *);
xla_op op_pad(const xla_op, const xla_op, size_t, const int64_t *, const int64_t *, const int64_t *);
xla_op op_pad_in_dim(const xla_op, const xla_op, int64_t, int64_t, int64_t);
xla_op op_slice(const xla_op, size_t, const int64_t *, size_t, const int64_t *, size_t, const int64_t *);
xla_op op_slice_in_dim(const xla_op, int64_t, int64_t, int64_t, int64_t);
xla_op op_dynamic_slice(const xla_op, size_t, const xla_op *, size_t, const int64_t *);
xla_op op_dynamic_update_slice(const xla_op, const xla_op, size_t, const xla_op *);
xla_op op_concat_in_dim(const xla_op, const xla_op *, size_t, int64_t);
xla_op op_tuple(const xla_builder, const xla_op *, size_t);
xla_op op_get_tuple_element(const xla_op, int64_t);
xla_op op_gather(const xla_op, const xla_op, const int64_t *, size_t,
const int64_t *, size_t, const int64_t *, size_t,
const int64_t *, const int64_t *, size_t);
xla_op op_scatter(size_t, const xla_op *, const xla_op, size_t, const xla_op *, const xla_computation,
size_t, const int64_t *, size_t, const int64_t *, size_t, const int64_t *, int64_t);
xla_op op_convert_element_type(const xla_op, int);
xla_op op_dimensions_size(const xla_op, int64_t);
xla_op op_reduce(const xla_op, const xla_op, const xla_computation,
const int64_t *, size_t);
xla_op op_internal_error(const xla_builder, const char *);
xla_op op_unknown_error(const xla_builder, const char *);
xla_op op_invalid_argument_error(const xla_builder, const char *);
xla_op op_iota1(const xla_builder, int, size_t);
xla_op op_iota(const xla_builder, int, size_t, const int64_t *, int64_t);
xla_op op_while(const xla_computation, const xla_computation, const xla_op);
xla_op op_conditional(const xla_op, const xla_op, const xla_computation,
const xla_op, const xla_computation);
xla_op op_conv(const xla_op, const xla_op, size_t, const int64_t *, const char*, int64_t, int64_t);
xla_op op_conv_general_dilated(const xla_op, const xla_op,
size_t, const int64_t *,
size_t, const int64_t *,
size_t, const int64_t *,
size_t, const int64_t *,
const int64_t *,
const int64_t *,
size_t, const int64_t *,
const int64_t *,
const int64_t *,
size_t, const int64_t *,
const int64_t *,
const int64_t *,
size_t, const int64_t *,
int64_t, int64_t);
xla_op op_batch_norm_inference(const xla_op,
const xla_op,
const xla_op,
const xla_op,
const xla_op,
float,
int64_t);
xla_builder op_builder(const xla_op);
int xla_op_valid(const xla_op);
void xla_op_free(xla_op);
int shape_dimensions_size(const shape);
size_t shape_tuple_shapes_size(const shape);
shape shape_tuple_shapes(const shape, int);
int shape_element_type(const shape);
int64_t shape_dimensions(const shape, int);
void shape_free(shape);
shape make_shape_array(int, size_t, const int64_t *);
shape make_shape_tuple(size_t, const shape *);
status get_shape(const xla_builder, const xla_op, shape *);
status get_element_type(const xla_builder, const xla_op, int *);
status get_dimensions_size(const xla_builder, const xla_op, int *);
status get_dimensions(const xla_builder, const xla_op, size_t *);
status build(const xla_builder, const xla_op, xla_computation *);
status compile(const pjrt_client, const xla_computation,
pjrt_loaded_executable *);
status execute(const pjrt_loaded_executable, const literal *, int,
pjrt_buffer ***);
status execute_b(const pjrt_loaded_executable, const pjrt_buffer *, int,
pjrt_buffer ***);
status first_error(const xla_builder);
status get_current_status(const xla_builder);
literal literal_create_from_shape(int, const int64_t *, size_t);
literal literal_create_from_shape_and_data(int, const int64_t *, size_t,
const void *, size_t);
literal literal_clone(const literal);
status literal_reshape(const literal, const int64_t *, size_t, literal *);
status literal_convert(const literal, int, literal *);
int64_t literal_element_count(const literal);
int literal_element_type(const literal);
void literal_shape(const literal, shape *);
void literal_decompose_tuple(literal, literal *, size_t);
int64_t literal_size_bytes(const literal);
void literal_copy_to(const literal, void *, size_t);
void literal_copy_from(literal, const void *, size_t);
literal literal_make_tuple(const literal *, size_t);
literal literal_make_tuple_owned(const literal *, size_t);
void literal_free(literal);
status hlo_module_proto_parse_and_return_unverified_module(const char *, size_t,
hlo_module_proto *);
status hlo_module_proto_parse_proto(const char *, size_t, bool,
hlo_module_proto *);
status hlo_module_from_proto(const hlo_module_proto, hlo_module *);
hlo_computation hlo_module_entry_computation(const hlo_module);
int64_t hlo_module_computation_count(const hlo_module);
int64_t hlo_module_instruction_count(const hlo_module);
char *hlo_module_to_string(const hlo_module);
xla_computation xla_computation_from_hlo_module_proto(const hlo_module_proto);
void hlo_module_proto_free(hlo_module_proto);
char *xla_computation_name(xla_computation);
hlo_module_proto xla_computation_proto(const xla_computation);
void xla_computation_free(xla_computation);
void status_free(status);
char *status_error_message(status);
#define FOR_EACH_NATIVE_TYPE(_) \
_(bool, PRED) \
_(int8_t, S8) \
_(int16_t, S16) \
_(int32_t, S32) \
_(int64_t, S64) \
_(uint8_t, U8) \
_(uint16_t, U16) \
_(uint32_t, U32) \
_(uint64_t, U64) \
_(float, F32) \
_(double, F64)
#define CONST_OP_R01(native_type, primitive_type) \
xla_op constant_r0_##native_type(const xla_builder, native_type); \
xla_op constant_r1c_##native_type(const xla_builder, native_type, size_t); \
xla_op constant_r1_##native_type(const xla_builder, const native_type *, \
size_t); \
literal create_r0_##native_type(native_type); \
literal create_r1_##native_type(const native_type *, size_t); \
native_type literal_get_first_element_##native_type(const literal);
FOR_EACH_NATIVE_TYPE(CONST_OP_R01)
#undef CONST_OP_R01
#ifdef __cplusplus
}
#endif
| ivy/ivy/engines/XLA/rust_api/xla_rs/xla_rs.h/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/xla_rs/xla_rs.h",
"repo_id": "ivy",
"token_count": 7623
} | 14 |
# global
from typing import Optional, Tuple
import math
import jax
import jax.numpy as jnp
import jaxlib.xla_extension
# local
from ivy.functional.backends.jax import JaxArray
import ivy
# Array API Standard #
# ------------------ #
def vorbis_window(
window_length: JaxArray,
*,
dtype: jnp.dtype = jnp.float32,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.array(
[
round(
math.sin(
(ivy.pi / 2) * (math.sin(ivy.pi * (i) / (window_length * 2)) ** 2)
),
8,
)
for i in range(1, window_length * 2)[0::2]
],
dtype=dtype,
)
def hann_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[jnp.dtype] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if size < 2:
return jnp.ones([size], dtype=dtype)
if periodic:
count = jnp.arange(size) / size
else:
count = jnp.linspace(start=0, stop=size, num=size)
return (0.5 - 0.5 * jnp.cos(2 * jnp.pi * count)).astype(dtype)
def kaiser_window(
window_length: int,
periodic: bool = True,
beta: float = 12.0,
*,
dtype: Optional[jnp.dtype] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if window_length < 2:
return jnp.ones([window_length], dtype=dtype)
if periodic is False:
return jnp.kaiser(M=window_length, beta=beta).astype(dtype)
else:
return jnp.kaiser(M=window_length + 1, beta=beta)[:-1].astype(dtype)
def tril_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
/,
*,
device: jaxlib.xla_extension.Device = None,
) -> Tuple[JaxArray, ...]:
return jnp.tril_indices(n=n_rows, k=k, m=n_cols)
def unsorted_segment_min(
data: JaxArray,
segment_ids: JaxArray,
num_segments: int,
) -> JaxArray:
# added this check to keep the same behaviour as tensorflow
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
return jax.ops.segment_min(data, segment_ids, num_segments)
def unsorted_segment_sum(
data: JaxArray,
segment_ids: JaxArray,
num_segments: int,
) -> JaxArray:
# Used the same check which is used for unsorted_segment_min as
# the check should be same
# Might require to change the assertion function name to
# check_unsorted_segment_valid_params
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
return jax.ops.segment_sum(data, segment_ids, num_segments)
def blackman_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[jnp.dtype] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if size < 2:
return jnp.ones([size], dtype=dtype)
if periodic:
count = jnp.arange(size) / size
else:
count = jnp.linspace(start=0, stop=size, num=size)
return (
(0.42 - 0.5 * jnp.cos(2 * jnp.pi * count))
+ (0.08 * jnp.cos(2 * jnp.pi * 2 * count))
).astype(dtype)
def trilu(
x: JaxArray, /, *, k: int = 0, upper: bool = True, out: Optional[JaxArray] = None
) -> JaxArray:
if upper:
return jnp.triu(x, k)
return jnp.tril(x, k)
def mel_weight_matrix(
num_mel_bins: int,
dft_length: int,
sample_rate: int,
lower_edge_hertz: float = 0.0,
upper_edge_hertz: float = 3000.0,
):
lower_edge_hertz = jnp.array(lower_edge_hertz)
upper_edge_hertz = jnp.array(upper_edge_hertz)
zero = jnp.array(0.0)
def hz_to_mel(f):
return 2595 * jnp.log10(1 + f / 700)
nyquist_hz = sample_rate / 2
linear_freqs = jnp.linspace(0, nyquist_hz, dft_length, dtype=jnp.float32)[1:]
spec_bin_mels = hz_to_mel(linear_freqs)[..., None]
mel_edges = jnp.linspace(
hz_to_mel(lower_edge_hertz),
hz_to_mel(upper_edge_hertz),
num_mel_bins + 2,
dtype=jnp.float32,
)
mel_edges = jnp.stack([mel_edges[i : i + 3] for i in range(num_mel_bins)])
lower_edge_mel, center_mel, upper_edge_mel = (
t.reshape((1, num_mel_bins)) for t in jnp.split(mel_edges, 3, axis=1)
)
lower_slopes = (spec_bin_mels - lower_edge_mel) / (center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spec_bin_mels) / (upper_edge_mel - center_mel)
mel_weights = jnp.maximum(zero, jnp.minimum(lower_slopes, upper_slopes))
return jnp.pad(mel_weights, [[1, 0], [0, 0]])
def unsorted_segment_mean(
data: JaxArray,
segment_ids: JaxArray,
num_segments: int,
) -> JaxArray:
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
segment_sum = jax.ops.segment_sum(data, segment_ids, num_segments)
segment_count = jax.ops.segment_sum(jnp.ones_like(data), segment_ids, num_segments)
segment_mean = segment_sum / segment_count
return segment_mean
def polyval(
coeffs: JaxArray,
x: JaxArray,
) -> JaxArray:
with ivy.PreciseMode(True):
promoted_type = ivy.promote_types(ivy.dtype(coeffs[0]), ivy.dtype(x[0]))
coeffs, x = ivy.promote_types_of_inputs(coeffs, x)
y = jnp.zeros_like(x)
for pv in coeffs:
y = y * x + pv
y = jnp.array(y, dtype=jnp.dtype(promoted_type))
return y
| ivy/ivy/functional/backends/jax/experimental/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/creation.py",
"repo_id": "ivy",
"token_count": 2518
} | 15 |
import jax.numpy as jnp
from typing import Optional, Union, Tuple, Sequence
from ivy.functional.backends.jax import JaxArray
import jax.lax as jlax
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
from ..statistical import _infer_dtype
@with_unsupported_dtypes(
{"0.4.24 and below": ("bfloat16",)},
backend_version,
)
def histogram(
a: jnp.ndarray,
/,
*,
bins: Optional[Union[int, jnp.ndarray]] = None,
axis: Optional[int] = None,
extend_lower_interval: Optional[bool] = False,
extend_upper_interval: Optional[bool] = False,
dtype: Optional[jnp.dtype] = None,
range: Optional[Tuple[float]] = None,
weights: Optional[jnp.ndarray] = None,
density: Optional[bool] = False,
out: Optional[jnp.ndarray] = None,
) -> Tuple[jnp.ndarray]:
min_a = jnp.min(a)
max_a = jnp.max(a)
if isinstance(bins, jnp.ndarray) and range:
raise ivy.exceptions.IvyException(
"Must choose between specifying bins and range or bin edges directly"
)
if range:
bins = jnp.linspace(start=range[0], stop=range[1], num=bins + 1, dtype=a.dtype)
range = None
elif isinstance(bins, int):
range = (min_a, max_a)
bins = jnp.linspace(start=range[0], stop=range[1], num=bins + 1, dtype=a.dtype)
range = None
if bins.size < 2:
raise ivy.exceptions.IvyException("bins must have at least 1 bin (size > 1)")
bins_out = bins.copy()
if extend_lower_interval and min_a < bins[0]:
bins = bins.at[0].set(min_a)
if extend_upper_interval and max_a > bins[-1]:
bins = bins.at[-1].set(max_a)
if a.ndim > 0 and axis is not None:
inverted_shape_dims = list(jnp.flip(jnp.arange(a.ndim)))
if isinstance(axis, int):
axis = [axis]
shape_axes = 1
for dimension in axis:
inverted_shape_dims.remove(dimension)
inverted_shape_dims.append(dimension)
shape_axes *= a.shape[dimension]
a_along_axis_1d = (
a.transpose(inverted_shape_dims).flatten().reshape((-1, shape_axes))
)
if weights is None:
ret = []
for a_1d in a_along_axis_1d:
ret_1D = jnp.histogram(
a_1d,
bins=bins,
range=range,
)[0]
ret.append(ret_1D)
else:
weights_along_axis_1d = (
weights.transpose(inverted_shape_dims)
.flatten()
.reshape((-1, shape_axes))
)
ret = []
for a_1d, weights_1d in zip(a_along_axis_1d, weights_along_axis_1d):
ret_1D = jnp.histogram(
a_1d,
weights=weights_1d,
bins=bins,
range=range,
)[0]
ret.append(ret_1D)
out_shape = list(a.shape)
for dimension in sorted(axis, reverse=True):
del out_shape[dimension]
out_shape.insert(0, len(bins) - 1)
ret = jnp.array(ret)
ret = ret.flatten()
index = jnp.zeros(len(out_shape), dtype=int)
ret_shaped = jnp.zeros(out_shape)
dim = 0
i = 0
if list(index) == list(jnp.array(out_shape) - 1):
ret_shaped = ret_shaped.at[tuple(index)].set(ret[i])
while list(index) != list(jnp.array(out_shape) - 1):
ret_shaped = ret_shaped.at[tuple(index)].set(ret[i])
dim_full_flag = False
while index[dim] == out_shape[dim] - 1:
index = index.at[dim].set(0)
dim += 1
dim_full_flag = True
index = index.at[dim].add(1)
i += 1
if dim_full_flag:
dim = 0
if list(index) == list(jnp.array(out_shape) - 1):
ret_shaped = ret_shaped.at[tuple(index)].set(ret[i])
ret = ret_shaped
else:
ret = jnp.histogram(
a=a, bins=bins, range=range, weights=weights, density=density
)[0]
if dtype:
ret = ret.astype(dtype)
bins_out = jnp.array(bins_out).astype(dtype)
# TODO: weird error when returning bins: return ret, bins_out
return ret
@with_unsupported_dtypes(
{"0.4.24 and below": ("complex64", "complex128")}, backend_version
)
def median(
input: JaxArray,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(axis, list):
axis = tuple(axis)
ret = jnp.median(
input,
axis=axis,
keepdims=keepdims,
out=out,
)
if input.dtype in [jnp.uint64, jnp.int64, jnp.float64]:
return ret.astype(jnp.float64)
elif input.dtype in [jnp.float16, jnp.bfloat16]:
return ret.astype(input.dtype)
else:
return ret.astype(jnp.float32)
# Jax doesn't support overwrite_input=True and out!=None
def nanmean(
a: JaxArray,
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
dtype: Optional[jnp.dtype] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(axis, list):
axis = tuple(axis)
return jnp.nanmean(a, axis=axis, keepdims=keepdims, dtype=dtype, out=out)
def nanmin(
x: JaxArray,
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: Optional[bool] = False,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[JaxArray] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(axis, list):
axis = tuple(axis)
return jnp.nanmin(
x, axis=axis, keepdims=keepdims, initial=initial, where=where, out=out
)
def nanprod(
a: JaxArray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[jnp.dtype] = None,
keepdims: Optional[bool] = False,
out: Optional[JaxArray] = None,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[JaxArray] = None,
) -> JaxArray:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = _infer_dtype(a.dtype)
axis = tuple(axis) if isinstance(axis, list) else axis
return jnp.nanprod(
a, axis=axis, keepdims=keepdims, dtype=dtype, out=out, initial=initial
)
def quantile(
a: JaxArray,
q: Union[float, JaxArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
interpolation: str = "linear",
keepdims: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
axis = tuple(axis) if isinstance(axis, list) else axis
interpolation = "nearest" if interpolation == "nearest_jax" else interpolation
return jnp.quantile(
a, q, axis=axis, method=interpolation, keepdims=keepdims, out=out
)
def corrcoef(
x: JaxArray,
/,
*,
y: Optional[JaxArray] = None,
rowvar: bool = True,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.corrcoef(x, y=y, rowvar=rowvar)
def nanmedian(
input: JaxArray,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
overwrite_input: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(axis, list):
axis = tuple(axis)
if overwrite_input:
copied_input = input.copy()
overwrite_input = False
out = None
return jnp.nanmedian(
copied_input,
axis=axis,
keepdims=keepdims,
overwrite_input=overwrite_input,
out=out,
)
return jnp.nanmedian(
input, axis=axis, keepdims=keepdims, overwrite_input=False, out=None
)
def bincount(
x: JaxArray,
/,
*,
weights: Optional[JaxArray] = None,
minlength: int = 0,
out: Optional[JaxArray] = None,
) -> JaxArray:
if weights is not None:
ret = jnp.bincount(x, weights=weights, minlength=minlength)
ret = ret.astype(weights.dtype)
else:
ret = jnp.bincount(x, minlength=minlength).astype(x.dtype)
return ret
def cov(
x1: JaxArray,
x2: JaxArray = None,
/,
*,
rowVar: bool = True,
bias: bool = False,
ddof: Optional[int] = None,
fweights: Optional[JaxArray] = None,
aweights: Optional[JaxArray] = None,
dtype: Optional[jnp.dtype] = None,
) -> JaxArray:
if not dtype:
x1 = jnp.asarray(x1, dtype=jnp.float64)
if jnp.ndim(x1) > 2:
raise ValueError("x1 has more than 2 dimensions")
if x2 is not None:
if jnp.ndim(x2) > 2:
raise ValueError("x2 has more than 2 dimensions")
if fweights is not None:
fweights = jnp.asarray(fweights, dtype=jnp.int64)
return jnp.cov(
m=x1,
y=x2,
rowvar=rowVar,
bias=bias,
ddof=ddof,
fweights=fweights,
aweights=aweights,
)
@with_unsupported_dtypes({"0.4.14 and below": ("bool",)}, backend_version)
def cummax(
x: JaxArray,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[jnp.dtype] = None,
out: Optional[JaxArray] = None,
) -> Tuple[JaxArray, JaxArray]:
if x.dtype in (jnp.complex128, jnp.complex64):
x = x.real
if exclusive or (reverse and exclusive):
if exclusive and reverse:
indices = __find_cummax_indices(jnp.flip(x, axis=axis), axis=axis)
x = jlax.cummax(jnp.flip(x, axis=axis), axis=axis)
x, indices = jnp.swapaxes(x, axis, -1), jnp.swapaxes(indices, axis, -1)
x, indices = jnp.concatenate(
(jnp.zeros_like(x[..., -1:]), x[..., :-1]), -1
), jnp.concatenate(
(jnp.zeros_like(indices[..., -1:]), indices[..., :-1]), -1
)
x, indices = jnp.swapaxes(x, axis, -1), jnp.swapaxes(indices, axis, -1)
res, indices = jnp.flip(x, axis=axis), jnp.flip(indices, axis=axis)
elif exclusive:
x = jnp.swapaxes(x, axis, -1)
x = jnp.concatenate((jnp.zeros_like(x[..., -1:]), x[..., :-1]), -1)
x = jnp.swapaxes(x, axis, -1)
indices = __find_cummax_indices(x, axis=axis)
res = jlax.cummax(x, axis=axis)
return res, indices
if reverse:
y = jnp.flip(x, axis=axis)
indices = __find_cummax_indices(y, axis=axis)
indices = jnp.flip(indices, axis=axis)
else:
indices = __find_cummax_indices(x, axis=axis)
return jlax.cummax(x, axis, reverse=reverse), indices
def __find_cummax_indices(
x: JaxArray,
axis: int = 0,
) -> JaxArray:
n, indice, indices = 0, [], []
if isinstance(x[0], JaxArray) and len(x[0].shape) >= 1:
if axis >= 1:
for ret1 in x:
indice = __find_cummax_indices(ret1, axis=axis - 1)
indices.append(indice)
else:
z_list = __get_index(x.tolist())
indices, n1 = x.copy(), {}
indices = jnp.zeros(jnp.asarray(indices.shape), dtype=x.dtype)
z_list = sorted(z_list, key=lambda i: i[1])
for y, y_index in z_list:
multi_index = y_index
if tuple(multi_index[1:]) not in n1:
n1[tuple(multi_index[1:])] = multi_index[0]
indices = indices.at[y_index].set(multi_index[0])
elif (
y >= x[tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))]
):
n1[tuple(multi_index[1:])] = multi_index[0]
indices = indices.at[y_index].set(multi_index[0])
else:
indices = indices.at[y_index].set(n1[tuple(multi_index[1:])])
else:
n, indices = 0, []
for idx, y in enumerate(x):
if idx == 0 or x[n] <= y:
n = idx
indices.append(n)
return jnp.asarray(indices, dtype="int64")
def __get_index(lst, indices=None, prefix=None):
if indices is None:
indices = []
if prefix is None:
prefix = []
if isinstance(lst, list):
for i, sub_lst in enumerate(lst):
sub_indices = prefix + [i]
__get_index(sub_lst, indices, sub_indices)
else:
indices.append((lst, tuple(prefix)))
return indices
@with_unsupported_dtypes(
{
"0.4.24 and below": (
"bfloat16",
"bool",
)
},
backend_version,
)
def cummin(
x: JaxArray,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[jnp.dtype] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if axis < 0:
axis = axis + len(x.shape)
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = _infer_dtype(x.dtype)
return jlax.cummin(x, axis, reverse=reverse).astype(dtype)
def igamma(
a: JaxArray,
/,
*,
x: JaxArray,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jlax.igamma(a=a, x=x)
| ivy/ivy/functional/backends/jax/experimental/statistical.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/statistical.py",
"repo_id": "ivy",
"token_count": 6665
} | 16 |
"""MXNet activation functions.
Collection of MXNet activation functions, wrapped to fit Ivy syntax and
signature.
"""
import mxnet as mx
import numpy as np
from ivy.utils.exceptions import IvyNotImplementedException
from typing import Optional, Union
def gelu(
x: None,
/,
*,
approximate: bool = False,
complex_mode="jax",
out: Optional[None] = None,
) -> None:
if approximate:
return 0.5 * x * (1 + mx.nd.tanh(((2 / np.pi) ** 0.5) * (x + 0.044715 * x**3)))
return mx.nd.LeakyReLU(x, act_type="gelu")
def leaky_relu(
x: None, /, *, alpha: float = 0.2, complex_mode="jax", out: Optional[None] = None
) -> None:
return mx.nd.LeakyReLU(x, slope=alpha)
def relu(x: None, /, *, complex_mode="jax", out: Optional[None] = None) -> None:
return mx.nd.relu(x)
def sigmoid(x: None, /, *, out: Optional[None] = None) -> None:
return mx.nd.sigmoid(x)
def softmax(
x: None, /, *, axis: Optional[int] = None, out: Optional[None] = None
) -> None:
return mx.nd.softmax(x, axis=axis)
def softplus(
x: Union[(int, float, mx.nd.NDArray)],
/,
*,
beta: Optional[Union[(int, float)]] = None,
threshold: Optional[Union[(int, float)]] = None,
complex_mode="jax",
out: Optional[None] = None,
) -> None:
if beta is not None and beta != 1:
x_beta = x * beta
res = (
mx.nd.add(
mx.nd.log1p(mx.nd.exp(-mx.nd.abs(x_beta))),
mx.nd.maximum(x_beta, 0),
)
) / beta
else:
x_beta = x
res = mx.nd.add(
mx.nd.log1p(mx.nd.exp(-mx.nd.abs(x_beta))), mx.nd.maximum(x_beta, 0)
)
if threshold is not None:
return mx.nd.where(x_beta > threshold, x, res).astype(x.dtype)
return res.astype(x.dtype)
# Softsign
def softsign(x: None, /, *, out: Optional[None] = None) -> None:
return mx.nd.softsign(x)
def log_softmax(x: None, /, *, axis: Optional[int] = -1, out: Optional[None] = None):
raise IvyNotImplementedException()
def mish(x: None, /, *, out: Optional[None] = None) -> None:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/activations.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/activations.py",
"repo_id": "ivy",
"token_count": 980
} | 17 |
from typing import Union, Optional, Sequence, Tuple, List
from numbers import Number
import mxnet as mx
from ivy.utils.exceptions import IvyNotImplementedException
def moveaxis(
a: Union[(None, mx.ndarray.NDArray)],
source: Union[(int, Sequence[int])],
destination: Union[(int, Sequence[int])],
/,
*,
copy: Optional[bool] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def heaviside(
x1: Union[(None, mx.ndarray.NDArray)],
x2: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def flipud(
m: Union[(None, mx.ndarray.NDArray)],
/,
*,
copy: Optional[bool] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def vstack(
arrays: Union[(Sequence[None], Sequence[mx.ndarray.NDArray])],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def hstack(
arrays: Union[(Sequence[None], Sequence[mx.ndarray.NDArray])],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def rot90(
m: Union[(None, mx.ndarray.NDArray)],
/,
*,
copy: Optional[bool] = None,
k: int = 1,
axes: Tuple[(int, int)] = (0, 1),
out: Union[(None, mx.ndarray.NDArray)] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def top_k(
x: None,
k: int,
/,
*,
axis: int = -1,
largest: bool = True,
sorted: bool = True,
out: Optional[Tuple[(None, None)]] = None,
) -> Tuple[(None, None)]:
raise IvyNotImplementedException()
def fliplr(
m: Union[(None, mx.ndarray.NDArray)],
/,
*,
copy: Optional[bool] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def i0(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def vsplit(
ary: Union[(None, mx.ndarray.NDArray)],
indices_or_sections: Union[(int, Tuple[(int, ...)])],
/,
*,
copy: Optional[bool] = None,
) -> List[Union[(None, mx.ndarray.NDArray)]]:
raise IvyNotImplementedException()
def dsplit(
ary: Union[(None, mx.ndarray.NDArray)],
indices_or_sections: Union[(int, Tuple[(int, ...)])],
/,
*,
copy: Optional[bool] = None,
) -> List[Union[(None, mx.ndarray.NDArray)]]:
raise IvyNotImplementedException()
def atleast_1d(
*arys: Union[(None, mx.ndarray.NDArray, bool, Number)], copy: Optional[bool] = None
) -> List[Union[(None, mx.ndarray.NDArray)]]:
raise IvyNotImplementedException()
def dstack(
arrays: Union[(Sequence[None], Sequence[mx.ndarray.NDArray])],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def atleast_2d(
*arys: Union[(None, mx.ndarray.NDArray)], copy: Optional[bool] = None
) -> List[Union[(None, mx.ndarray.NDArray)]]:
raise IvyNotImplementedException()
def atleast_3d(
*arys: Union[(None, mx.ndarray.NDArray, bool, Number)], copy: Optional[bool] = None
) -> List[Union[(None, mx.ndarray.NDArray)]]:
raise IvyNotImplementedException()
def take(
x: Union[int, List, Union[(None, mx.ndarray.NDArray)]],
indices: Union[int, List, Union[(None, mx.ndarray.NDArray)]],
/,
*,
axis: Optional[int] = None,
mode: str = "clip",
fill_value: Optional[Number] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def take_along_axis(
arr: Union[(None, mx.ndarray.NDArray)],
indices: Union[(None, mx.ndarray.NDArray)],
axis: int,
/,
*,
mode: str = "fill",
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def hsplit(
ary: Union[(None, mx.ndarray.NDArray)],
indices_or_sections: Union[(int, Tuple[(int, ...)])],
/,
*,
copy: Optional[bool] = None,
) -> List[Union[(None, mx.ndarray.NDArray)]]:
raise IvyNotImplementedException()
def broadcast_shapes(*shapes: Union[(List[int], List[Tuple])]) -> Tuple[(int, ...)]:
raise IvyNotImplementedException()
def expand(
x: Union[(None, mx.ndarray.NDArray)],
shape: Union[(List[int], List[Tuple])],
/,
*,
copy: Optional[bool] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def concat_from_sequence(
input_sequence: Union[(Tuple[None], List[None])],
/,
*,
new_axis: int = 0,
axis: int = 0,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/experimental/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/manipulation.py",
"repo_id": "ivy",
"token_count": 2312
} | 18 |
import mxnet as mx
from numbers import Number
from typing import Optional, Union, Tuple
import numpy as np
import ivy
from ivy.utils.exceptions import IvyNotImplementedException
def argmax(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[Union[(ivy.Dtype, ivy.NativeDtype)]] = None,
select_last_index: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def argmin(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[Union[np.dtype, str]] = None,
select_last_index: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def nonzero(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
as_tuple: bool = True,
size: Optional[int] = None,
fill_value: Number = 0,
) -> Union[(None, mx.ndarray.NDArray, Tuple[Union[(None, mx.ndarray.NDArray)]])]:
raise IvyNotImplementedException()
def where(
condition: Union[(None, mx.ndarray.NDArray)],
x1: Union[(None, mx.ndarray.NDArray)],
x2: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def argwhere(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/searching.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/searching.py",
"repo_id": "ivy",
"token_count": 727
} | 19 |
from typing import Optional, Union, Tuple, Sequence
import numpy as np
import math
import ivy # noqa
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
from ..statistical import _infer_dtype
from copy import deepcopy
@with_unsupported_dtypes(
{"1.26.3 and below": ("bfloat16",)},
backend_version,
)
def histogram(
a: np.ndarray,
/,
*,
bins: Optional[Union[int, np.ndarray]] = None,
axis: Optional[int] = None,
extend_lower_interval: Optional[bool] = False,
extend_upper_interval: Optional[bool] = False,
dtype: Optional[np.dtype] = None,
range: Optional[Tuple[float]] = None,
weights: Optional[np.ndarray] = None,
density: Optional[bool] = False,
out: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray]:
min_a = np.min(a)
max_a = np.max(a)
if isinstance(bins, np.ndarray) and range:
raise ivy.exceptions.IvyException(
"Must choose between specifying bins and range or bin edges directly"
)
if range:
bins = np.linspace(start=range[0], stop=range[1], num=bins + 1, dtype=a.dtype)
range = None
elif isinstance(bins, int):
range = (min_a, max_a)
bins = np.linspace(start=range[0], stop=range[1], num=bins + 1, dtype=a.dtype)
range = None
if bins.size < 2:
raise ivy.exceptions.IvyException("bins must have at least 1 bin (size > 1)")
bins_out = bins.copy()
if extend_lower_interval and min_a < bins[0]:
bins[0] = min_a
if extend_upper_interval and max_a > bins[-1]:
bins[-1] = max_a
if a.ndim > 0 and axis is not None:
inverted_shape_dims = list(np.flip(np.arange(a.ndim)))
if isinstance(axis, int):
axis = [axis]
shape_axes = 1
for dimension in axis:
inverted_shape_dims.remove(dimension)
inverted_shape_dims.append(dimension)
shape_axes *= a.shape[dimension]
a_along_axis_1d = (
a.transpose(inverted_shape_dims).flatten().reshape((-1, shape_axes))
)
if weights is None:
ret = []
for a_1d in a_along_axis_1d:
ret_1d = np.histogram(
a_1d,
bins=bins,
range=range,
# TODO: waiting tensorflow version support to density
# density=density,
)[0]
ret.append(ret_1d)
else:
weights_along_axis_1d = (
weights.transpose(inverted_shape_dims)
.flatten()
.reshape((-1, shape_axes))
)
ret = []
for a_1d, weights_1d in zip(a_along_axis_1d, weights_along_axis_1d):
ret_1d = np.histogram(
a_1d,
weights=weights_1d,
bins=bins,
range=range,
# TODO: waiting tensorflow version support to density
# density=density,
)[0]
ret.append(ret_1d)
out_shape = list(a.shape)
for dimension in sorted(axis, reverse=True):
del out_shape[dimension]
out_shape.insert(0, len(bins) - 1)
ret = np.array(ret)
ret = ret.flatten()
index = np.zeros(len(out_shape), dtype=int)
ret_shaped = np.zeros(out_shape)
dim = 0
i = 0
if list(index) == list(np.array(out_shape) - 1):
ret_shaped[tuple(index)] = ret[i]
while list(index) != list(np.array(out_shape) - 1):
ret_shaped[tuple(index)] = ret[i]
dim_full_flag = False
while index[dim] == out_shape[dim] - 1:
index[dim] = 0
dim += 1
dim_full_flag = True
index[dim] += 1
i += 1
if dim_full_flag:
dim = 0
if list(index) == list(np.array(out_shape) - 1):
ret_shaped[tuple(index)] = ret[i]
ret = ret_shaped
else:
ret = np.histogram(
a=a, bins=bins, range=range, weights=weights, density=density
)[0]
if dtype:
ret = ret.astype(dtype)
bins_out = np.array(bins_out).astype(dtype)
# TODO: weird error when returning bins: return ret, bins_out
return ret
def median(
input: np.ndarray,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if out is not None:
out = np.reshape(out, input.shape)
ret = np.median(
input,
axis=axis,
keepdims=keepdims,
out=out,
)
if input.dtype in [np.uint64, np.int64, np.float64]:
return ret.astype(np.float64)
elif input.dtype in [np.float16]:
return ret.astype(input.dtype)
else:
return ret.astype(np.float32)
median.support_native_out = True
def nanmean(
a: np.ndarray,
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
dtype: Optional[np.dtype] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if isinstance(axis, list):
axis = tuple(axis)
return np.nanmean(a, axis=axis, keepdims=keepdims, dtype=dtype, out=out)
nanmean.support_native_out = True
def nanmin(
a: np.ndarray,
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: Optional[bool] = False,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[np.ndarray] = True,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
axis = tuple(axis) if isinstance(axis, list) else axis
if where is None:
where = True
return np.nanmin(
a=a,
axis=axis,
keepdims=keepdims,
out=out,
initial=initial,
where=where,
)
nanmin.support_native_out = True
def nanprod(
a: np.ndarray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[np.dtype] = None,
keepdims: Optional[bool] = False,
out: Optional[np.ndarray] = None,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[np.ndarray] = None,
) -> np.ndarray:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = _infer_dtype(a.dtype)
axis = tuple(axis) if isinstance(axis, list) else axis
return np.asarray(
np.nanprod(
a=a, axis=axis, dtype=dtype, keepdims=keepdims, out=out, initial=initial
)
)
nanprod.support_native_out = True
def _validate_quantile(q):
if isinstance(q, float):
q = np.asarray(q)
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if not (0.0 <= q[i] <= 1.0):
return False
else:
if not (np.all(q >= 0) and np.all(q <= 1)):
return False
return True
def _to_positive_axis(axis, ndim):
if not isinstance(axis, (list, tuple)):
axis = [axis]
if len(axis) == 0:
raise ValueError("Axis can't be empty!")
if len(set(axis)) != len(axis):
raise ValueError("Duplicated axis!")
for i in range(len(axis)):
if not (isinstance(axis[i], int) and (ndim > axis[i] >= -ndim)):
raise ValueError("Axis must be int in range [-rank(x), rank(x))")
if axis[i] < 0:
axis[i] += ndim
return axis
def _handle_axis(a, q, fn, keepdims=False, axis=None):
nd = a.ndim
axis_arg = deepcopy(axis)
if axis is not None:
axis = _to_positive_axis(axis, nd)
if len(axis) == 1:
axis_arg = axis[0]
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
for i, s in enumerate(sorted(keep)):
a = np.moveaxis(a, s, i)
a = a.reshape(a.shape[:nkeep] + (-1,))
axis_arg = -1
ret = fn(a, q, axis=axis_arg)
if keepdims:
if axis is None:
index_ret = (None,) * nd
else:
index_ret = tuple(None if i in axis else slice(None) for i in range(nd))
ret = ret[(Ellipsis,) + index_ret]
return ret
def _quantile(a, q, axis=None):
if isinstance(q, float):
q = np.asarray(q)
ret_dtype = a.dtype
if q.ndim > 1:
raise ValueError("q argument must be a scalar or 1-dimensional!")
if axis is None:
axis = 0
a = a.flatten()
elif axis != 0:
a = np.moveaxis(a, axis, 0)
axis = 0
n = a.shape[axis]
indices = q * (n - 1)
a.sort(axis)
indices_below = np.floor(indices).astype(np.int32)
indices_upper = np.ceil(indices).astype(np.int32)
weights = indices - indices_below.astype("float64")
indices_below = np.clip(indices_below, 0, n - 1)
indices_upper = np.clip(indices_upper, 0, n - 1)
tensor_upper = np.take(a, indices_upper, axis=axis) # , mode="clip")
tensor_below = np.take(a, indices_below, axis=axis) # , mode="clip")
pred = weights <= 0.5
out = np.where(pred, tensor_below, tensor_upper)
return out.astype(ret_dtype)
def _compute_quantile_wrapper(
x, q, axis=None, keepdims=False, interpolation="linear", out=None
):
if not _validate_quantile(q):
raise ValueError("Quantiles must be in the range [0, 1]")
if interpolation in [
"linear",
"lower",
"higher",
"midpoint",
"nearest",
"nearest_jax",
]:
if interpolation == "nearest_jax":
return _handle_axis(x, q, _quantile, keepdims=keepdims, axis=axis)
else:
axis = tuple(axis) if isinstance(axis, list) else axis
return np.quantile(
x, q, axis=axis, method=interpolation, keepdims=keepdims, out=out
).astype(x.dtype)
else:
raise ValueError(
"Interpolation must be 'linear', 'lower', 'higher', 'midpoint' or 'nearest'"
)
def quantile(
a: np.ndarray,
q: Union[float, np.ndarray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
interpolation: str = "linear",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
# quantile method in numpy backend, always return an array with dtype=float64.
# in other backends, the output is the same dtype as the input.
# added the nearest_jax mode to enable jax-like calculations for method="nearest"
return _compute_quantile_wrapper(
a,
q,
axis=axis,
keepdims=keepdims,
interpolation=interpolation,
out=out,
)
def corrcoef(
x: np.ndarray,
/,
*,
y: Optional[np.ndarray] = None,
rowvar: bool = True,
dtype: np.dtype = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
dtype = dtype if dtype is not None else np.float64
return np.corrcoef(x, y=y, rowvar=rowvar, dtype=dtype)
@with_unsupported_dtypes(
{"1.25.0 and below": ("bfloat16",)},
backend_version,
)
def nanmedian(
input: np.ndarray,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
overwrite_input: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.nanmedian(
input, axis=axis, keepdims=keepdims, overwrite_input=overwrite_input, out=out
)
nanmedian.support_native_out = True
def bincount(
x: np.ndarray,
/,
*,
weights: Optional[np.ndarray] = None,
minlength: int = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if weights is not None:
ret = np.bincount(x, weights=weights, minlength=minlength)
ret = ret.astype(weights.dtype)
else:
ret = np.bincount(x, minlength=minlength)
ret = ret.astype(x.dtype)
return ret
bincount.support_native_out = False
def cov(
x1: np.ndarray,
x2: np.ndarray = None,
/,
*,
rowVar: bool = True,
bias: bool = False,
ddof: Optional[int] = None,
fweights: Optional[np.ndarray] = None,
aweights: Optional[np.ndarray] = None,
dtype: Optional[np.dtype] = None,
) -> np.ndarray:
return np.cov(
m=x1,
y=x2,
rowvar=rowVar,
bias=bias,
ddof=ddof,
fweights=fweights,
aweights=aweights,
dtype=dtype,
)
cov.support_native_out = False
def cummax(
x: np.ndarray,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[np.dtype] = None,
out: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, np.ndarray]:
if exclusive or reverse:
if exclusive and reverse:
indices = __find_cummax_indices(np.flip(x, axis=axis), axis=axis)
x = np.maximum.accumulate(np.flip(x, axis=axis), axis=axis, dtype=x.dtype)
x = np.swapaxes(x, axis, -1)
indices = np.swapaxes(indices, axis, -1)
x, indices = np.concatenate(
(np.zeros_like(x[..., -1:]), x[..., :-1]), -1
), np.concatenate((np.zeros_like(indices[..., -1:]), indices[..., :-1]), -1)
x, indices = np.swapaxes(x, axis, -1), np.swapaxes(indices, axis, -1)
res, indices = np.flip(x, axis=axis), np.flip(indices, axis=axis)
elif exclusive:
x = np.swapaxes(x, axis, -1)
x = np.concatenate((np.zeros_like(x[..., -1:]), x[..., :-1]), -1)
x = np.swapaxes(x, axis, -1)
indices = __find_cummax_indices(x, axis=axis)
res = np.maximum.accumulate(x, axis=axis, dtype=x.dtype)
elif reverse:
x = np.flip(x, axis=axis)
indices = __find_cummax_indices(x, axis=axis)
x = np.maximum.accumulate(x, axis=axis)
res, indices = np.flip(x, axis=axis), np.flip(indices, axis=axis)
return res, indices
indices = __find_cummax_indices(x, axis=axis)
return np.maximum.accumulate(x, axis=axis, dtype=x.dtype), indices
def __find_cummax_indices(
x: np.ndarray,
axis: int = 0,
) -> np.ndarray:
indices = []
if x[0] is np.ndarray:
if axis >= 1:
for ret1 in x:
indice = __find_cummax_indices(ret1, axis=axis - 1)
indices.append(indice)
else:
indice_list = __get_index(x.tolist())
indices, n1 = x.copy(), {}
indices.fill(0)
indice_list = sorted(indice_list, key=lambda i: i[1])
for y, y_index in indice_list:
multi_index = y_index
if tuple(multi_index[1:]) not in n1:
n1[tuple(multi_index[1:])] = multi_index[0]
indices[y_index] = multi_index[0]
elif (
y >= x[tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))]
):
n1[tuple(multi_index[1:])] = multi_index[0]
indices[y_index] = multi_index[0]
else:
indices[y_index] = n1[tuple(multi_index[1:])]
else:
n = 0
for index1, ret1 in enumerate(x):
if x[n] <= ret1 or index1 == 0:
n = index1
indices.append(n)
return np.array(indices, dtype=np.int64)
def __get_index(lst, indices=None, prefix=None):
if indices is None:
indices = []
if prefix is None:
prefix = []
if isinstance(lst, list):
for i, sub_lst in enumerate(lst):
sub_indices = prefix + [i]
__get_index(sub_lst, indices, sub_indices)
else:
indices.append((lst, tuple(prefix)))
return indices
@with_unsupported_dtypes({"1.26.3 and below": "bfloat16"}, backend_version)
def cummin(
x: np.ndarray,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[np.dtype] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if dtype is None:
dtype = _infer_dtype(x.dtype)
if not (reverse):
return np.minimum.accumulate(x, axis, dtype=dtype, out=out)
elif reverse:
x = np.minimum.accumulate(np.flip(x, axis=axis), axis=axis, dtype=dtype)
return np.flip(x, axis=axis)
def igamma(
a: np.ndarray,
/,
*,
x: np.ndarray,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
def igamma_cal(a, x):
t = np.linspace(0, x, 10000, dtype=np.float64)
y = np.exp(-t) * (t ** (a - 1))
integral = np.trapz(y, t)
return integral / math.gamma(a)
igamma_vec = np.vectorize(igamma_cal)
return igamma_vec(a, x).astype(a.dtype)
| ivy/ivy/functional/backends/numpy/experimental/statistical.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/statistical.py",
"repo_id": "ivy",
"token_count": 8322
} | 20 |
# global
import sys
import paddle as paddle
# local
import ivy
from ivy.func_wrapper import _dtype_from_version
backend_version = {"version": paddle.version.full_version}
# noinspection PyUnresolvedReferences
if not ivy.is_local():
_module_in_memory = sys.modules[__name__]
else:
_module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]
use = ivy.utils.backend.ContextManager(_module_in_memory)
# wrap dunder methods of native tensors to return NotImplemented to prioritize Ivy array methods.
def dunder_wrapper(func):
def rep_method(*args, **kwargs):
for arg in args:
if ivy.is_ivy_array(arg):
return NotImplemented
return func(*args, **kwargs)
return rep_method
# check for previously imported paddle modules
modules_to_patch = []
tensors_to_patch = []
tmp_globals = dict(globals())
for name, value in tmp_globals.items():
if value == "paddle.Tensor":
tensors_to_patch.append(name)
try:
if value.__name__ == "paddle":
modules_to_patch.append(name)
except AttributeError:
pass
methods_to_patch = [
"__add__",
"__sub__",
"__mul__",
"__div__",
"__truediv__",
"__floordiv__",
"__mod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__or__",
"__xor__",
"__pow__",
"__matmul__",
]
for module in modules_to_patch:
for method in methods_to_patch:
exec(
module
+ ".Tensor."
+ method
+ " = dunder_wrapper("
+ module
+ ".Tensor."
+ method
+ ")"
)
for tensor in tensors_to_patch:
for method in methods_to_patch:
exec(tensor + "." + method + " = dunder_wrapper(" + tensor + "." + method + ")")
NativeArray = paddle.Tensor
NativeVariable = paddle.Tensor # paddle.fluid.framework.Variable
NativeDevice = paddle.device.core.Place
NativeDtype = paddle.dtype
NativeShape = list
NativeSparseArray = paddle.Tensor
# devices
valid_devices = (
"cpu",
"gpu",
)
invalid_devices = "tpu"
# native data types
native_int8 = paddle.int8
native_int16 = paddle.int16
native_int32 = paddle.int32
native_int64 = paddle.int64
native_uint8 = paddle.uint8
native_bfloat16 = paddle.bfloat16
native_float16 = paddle.float16
native_float32 = paddle.float32
native_float64 = paddle.float64
native_complex64 = paddle.complex64
native_complex128 = paddle.complex128
native_double = native_float64
native_bool = paddle.bool
# valid data types
# ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures.
# update these to add new dtypes
valid_dtypes = {
"2.4.2 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
),
"2.5.0 and above": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
),
}
valid_numeric_dtypes = {
"2.4.2 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
),
"2.5.0 and above": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
),
}
valid_int_dtypes = {
"2.6.0 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
),
}
valid_float_dtypes = {
"2.4.2 and below": (ivy.float16, ivy.float32, ivy.float64),
"2.5.0 and above": (ivy.bfloat16, ivy.float16, ivy.float32, ivy.float64),
}
valid_uint_dtypes = {"2.6.0 and below": (ivy.uint8,)}
valid_complex_dtypes = {"2.6.0 and below": (ivy.complex64, ivy.complex128)}
# leave these untouched
valid_dtypes = _dtype_from_version(valid_dtypes, backend_version)
valid_numeric_dtypes = _dtype_from_version(valid_numeric_dtypes, backend_version)
valid_int_dtypes = _dtype_from_version(valid_int_dtypes, backend_version)
valid_float_dtypes = _dtype_from_version(valid_float_dtypes, backend_version)
valid_uint_dtypes = _dtype_from_version(valid_uint_dtypes, backend_version)
valid_complex_dtypes = _dtype_from_version(valid_complex_dtypes, backend_version)
# update these to add new dtypes
invalid_dtypes = {
"2.4.2 and below": (
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
),
"2.5.0 and above": (
ivy.uint16,
ivy.uint32,
ivy.uint64,
),
}
invalid_numeric_dtypes = {
"2.4.2 and below": (
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
),
"2.5.0 and above": (
ivy.uint16,
ivy.uint32,
ivy.uint64,
),
}
invalid_int_dtypes = {"2.6.0 and below": (ivy.uint16, ivy.uint32, ivy.uint64)}
invalid_float_dtypes = {"2.4.2 and below": (ivy.bfloat16,), "2.5.0 and above": ()}
invalid_uint_dtypes = {"2.6.0 and below": (ivy.uint16, ivy.uint32, ivy.uint64)}
invalid_complex_dtypes = {"2.6.0 and below": ()}
# leave these untouched
invalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version)
invalid_numeric_dtypes = _dtype_from_version(invalid_numeric_dtypes, backend_version)
invalid_float_dtypes = _dtype_from_version(invalid_float_dtypes, backend_version)
invalid_uint_dtypes = _dtype_from_version(invalid_uint_dtypes, backend_version)
invalid_complex_dtypes = _dtype_from_version(invalid_complex_dtypes, backend_version)
native_inplace_support = False
supports_gradients = True
def closest_valid_dtype(type=None, /, as_native=False):
if type is None:
return ivy.default_dtype()
if isinstance(type, str) and type in invalid_dtypes:
type = {
"uint16": native_uint8,
"uint32": native_uint8,
"uint64": native_uint8,
"bfloat16": native_float16,
}[type]
return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type)
backend = "paddle"
# local sub-modules
from . import activations
from .activations import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import general
from .general import *
from . import gradients
from .gradients import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import manipulation
from .manipulation import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
from . import experimental
from .experimental import *
from . import control_flow_ops
from .control_flow_ops import *
from . import module
from .module import *
# sub-backends
from . import sub_backends
from .sub_backends import *
NativeModule = paddle.nn.Layer
| ivy/ivy/functional/backends/paddle/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/__init__.py",
"repo_id": "ivy",
"token_count": 3374
} | 21 |
# global
from typing import Callable
import paddle
# local
import ivy
from ivy.func_wrapper import inputs_to_native_arrays
from ivy.functional.ivy.gradients import (
_flatten_containers,
_rebuild_flattened_containers,
)
from ivy.utils.exceptions import IvyNotImplementedException
def bind_custom_gradient_function(func, custom_grad_fn):
class _CustomModule(paddle.autograd.PyLayer):
@staticmethod
def forward(ctx, x):
ret = ivy.to_native(func(x), nested=True, include_derived=True)
ctx.save_for_backward(x, ret)
return ret
@staticmethod
def backward(ctx, upstream):
grads = custom_grad_fn(
*ivy.to_ivy(
(ctx.saved_tensor(), upstream), nested=True, include_derived=True
)
)
return ivy.to_native(grads, nested=True, include_derived=True)
custom_module = _CustomModule.apply
return inputs_to_native_arrays(custom_module)
def vjp(func: Callable, *primals):
flattened_primals, ret_idxs = _flatten_containers(primals)
def grad_fn(*x_in):
return _flatten_containers(
ivy.to_native(
func(
*ivy.to_ivy(
_rebuild_flattened_containers(x_in, ret_idxs), nested=True
)
),
nested=True,
include_derived=True,
)
)[0]
# primals_out = _rebuild_flattened_containers(
# grad_fn(*ivy.to_ivy(flattened_primals, nested=True)), ret_idxs
# )
primals_out = func(*ivy.to_ivy(primals, nested=True))
def vjpfun(x_in):
_, vjp_result = ivy.to_ivy(
paddle.incubate.autograd.vjp(
grad_fn,
ivy.to_native(flattened_primals, nested=True),
ivy.to_native(_flatten_containers(x_in)[0], nested=True),
)
)
return ivy.to_ivy(
_rebuild_flattened_containers(vjp_result, ret_idxs),
nested=True,
include_derived=True,
)
return (ivy.to_ivy(primals_out, nested=True, include_derived=True), vjpfun)
def jvp(func: Callable, primals, tangents):
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/paddle/experimental/gradients.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/gradients.py",
"repo_id": "ivy",
"token_count": 1146
} | 22 |
# global
import paddle
from typing import Union, Optional, Tuple, Literal, List, NamedTuple, Sequence
from collections import namedtuple
# local
import ivy
from ivy import inf
from ivy.utils.exceptions import IvyNotImplementedException
import ivy.functional.backends.paddle as paddle_backend
from . import backend_version
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
with_unsupported_dtypes,
with_supported_dtypes,
with_supported_device_and_dtypes,
)
from .elementwise import _elementwise_helper
# Array API Standard #
# -------------------#
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"int32",
"int64",
"uint8",
"float16",
"complex",
"bool",
)
}
},
backend_version,
)
def cholesky(
x: paddle.Tensor, /, *, upper: bool = False, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.linalg.cholesky(x, upper=upper)
def cross(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
axisa: int = -1,
axisb: int = -1,
axisc: int = -1,
axis: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
def _cross(x1, x2, axisa, axisb, axisc, axis):
if axis is not None:
return paddle.cross(x1, x2, axis=axis)
x1 = paddle.moveaxis(x1, axisa, 1)
x2 = paddle.moveaxis(x2, axisb, 1)
ret = paddle.cross(x1, x2)
return paddle.moveaxis(ret, 1, axisc)
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if x1.dtype in [
paddle.int8,
paddle.int16,
paddle.uint8,
paddle.float16,
paddle.complex64,
paddle.complex128,
paddle.bool,
]:
if paddle.is_complex(x1):
return paddle.complex(
_cross(x1.real(), x2.real(), axisa, axisb, axisc, axis),
_cross(x1.real(), x2.real(), axisa, axisb, axisc, axis),
)
return _cross(
x1.cast("float32"),
x2.cast("float32"),
axisa,
axisb,
axisc,
axis,
).cast(ret_dtype)
return _cross(x1, x2, axisa, axisb, axisc, axis)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def det(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if x.dtype in [
paddle.int8,
paddle.int16,
paddle.int32,
paddle.int64,
paddle.uint8,
paddle.float16,
paddle.bool,
]:
ret = paddle.linalg.det(x.cast("float32")).cast(x.dtype)
else:
ret = paddle.linalg.det(x)
if x.ndim == 2:
ret = paddle_backend.squeeze(ret, axis=0)
return ret
def diagonal(
x: paddle.Tensor,
/,
*,
offset: int = 0,
axis1: int = -2,
axis2: int = -1,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if x.dtype in [
paddle.int8,
paddle.int16,
paddle.uint8,
paddle.float16,
paddle.complex64,
paddle.complex128,
]:
if paddle.is_complex(x):
return paddle.complex(
paddle.diagonal(x.real(), offset=offset, axis1=axis1, axis2=axis2),
paddle.diagonal(x.imag(), offset=offset, axis1=axis1, axis2=axis2),
)
return paddle.diagonal(
x.cast("float32"), offset=offset, axis1=axis1, axis2=axis2
).cast(x.dtype)
return paddle.diagonal(x, offset=offset, axis1=axis1, axis2=axis2)
def eigh(
x: paddle.Tensor,
/,
*,
UPLO: str = "L",
out: Optional[paddle.Tensor] = None,
) -> Tuple[paddle.Tensor]:
result_tuple = NamedTuple(
"eigh", [("eigenvalues", paddle.Tensor), ("eigenvectors", paddle.Tensor)]
)
eigenvalues, eigenvectors = paddle.linalg.eigh(x, UPLO=UPLO)
return result_tuple(eigenvalues, eigenvectors)
def eigvalsh(
x: paddle.Tensor,
/,
*,
UPLO: str = "L",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.linalg.eigvalsh(x, UPLO=UPLO)
def inner(
x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
ret_dtype = x1.dtype
if x1.dtype in [
paddle.int8,
paddle.int16,
paddle.int32,
paddle.int64,
paddle.uint8,
paddle.float16,
paddle.bool,
]:
x1, x2 = x1.cast("float32"), x2.cast("float32")
return paddle.inner(x1, x2).squeeze().cast(ret_dtype)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def inv(
x: paddle.Tensor,
/,
*,
adjoint: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret_dtype = x.dtype
if x.dtype in [
paddle.int8,
paddle.int16,
paddle.int32,
paddle.int64,
paddle.uint8,
paddle.float16,
paddle.bool,
]:
x = x.cast("float32")
if adjoint:
x = paddle.moveaxis(x, -2, -1).conj()
return paddle.inverse(x).cast(ret_dtype)
def matmul(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
transpose_a: bool = False,
transpose_b: bool = False,
adjoint_a: bool = False,
adjoint_b: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
ret_dtype = x1.dtype
if x1.dtype in [
paddle.int8,
paddle.int16,
paddle.int32,
paddle.int64,
paddle.uint8,
paddle.float16,
paddle.bool,
paddle.bfloat16,
]:
x1, x2 = x1.cast("float32"), x2.cast("float32")
if adjoint_a:
x1 = paddle.moveaxis(x1, -2, -1).conj()
if adjoint_b:
x2 = paddle.moveaxis(x2, -2, -1).conj()
ret = paddle.matmul(x1, x2, transpose_x=transpose_a, transpose_y=transpose_b).cast(
ret_dtype
)
# handle case where ret should be 0d.
if x1.ndim == 1 and x2.ndim == 1:
ret_dtype = ret.dtype
if ret_dtype in [paddle.int16]:
ret = ret.cast(paddle.int32)
return ret.squeeze().astype(ret_dtype)
return ret
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def matrix_norm(
x: paddle.Tensor,
/,
*,
ord: Union[int, float, Literal[inf, -inf, "fro", "nuc"]] = "fro",
axis: Tuple[int, int] = (-2, -1),
keepdims: bool = False,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if dtype is not None:
x = ivy.astype(x, dtype=dtype)
axis_ = list(axis) # paddle.moveaxis doesn't support tuple axes
if ord == "nuc":
x = paddle.moveaxis(x, axis_, [-2, -1])
# backend implementation is used here instead of native implementation
# because native implementation causes issues when the return should be
# a scalar which is solved in the backend implementation
ret = paddle_backend.sum(
paddle_backend.svd(x)[1],
axis=-1,
)
elif ord == 1:
ret = paddle_backend.max(
paddle.sum(paddle_backend.abs(x), axis=axis[0], keepdim=True),
axis=axis,
keepdims=keepdims,
)
elif ord == -1:
ret = paddle_backend.min(
paddle.sum(paddle_backend.abs(x), axis=axis[0], keepdim=True),
axis=axis,
keepdims=keepdims,
)
elif ord == 2:
x = paddle.moveaxis(x, axis_, [-2, -1])
ret = paddle_backend.max(
paddle_backend.svd(x)[1],
axis=-1,
)
elif ord == -2:
x = paddle.moveaxis(x, axis_, [-2, -1])
ret = paddle_backend.min(
paddle_backend.svd(x)[1],
axis=-1,
)
elif ord == float("inf"):
ret = paddle_backend.max(
paddle.sum(paddle.abs(x), axis=axis[1], keepdim=True),
axis=axis,
keepdims=keepdims,
)
elif ord == float("-inf"):
ret = paddle_backend.min(
paddle.sum(paddle.abs(x), axis=axis[1], keepdim=True),
axis=axis,
keepdims=keepdims,
)
else:
ret = paddle.linalg.norm(x, p=ord, axis=axis, keepdim=keepdims)
if x.ndim == 2 and not keepdims:
ret = paddle.squeeze(ret)
elif keepdims and ord in ["nuc", -2, 2]:
# only these norms because the use of SVD
for dim in axis:
# although expand_dims support tuple axes, we have to loop
# over the axes because it faces problems when the input is a scalar
ret = paddle_backend.expand_dims(ret, axis=dim % x.ndim)
return ret
def eig(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> Tuple[paddle.Tensor]:
result_tuple = NamedTuple(
"eig", [("eigenvalues", paddle.Tensor), ("eigenvectors", paddle.Tensor)]
)
eigenvalues, eigenvectors = paddle.linalg.eig(x)
return result_tuple(eigenvalues, eigenvectors)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def matrix_power(
x: paddle.Tensor, n: int, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.linalg.matrix_power(x, n)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def matrix_rank(
x: paddle.Tensor,
/,
*,
atol: Optional[Union[float, Tuple[float]]] = None,
rtol: Optional[Union[float, Tuple[float]]] = None,
hermitian: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if (x.ndim < 2) or (0 in x.shape):
return paddle.to_tensor(0).squeeze().astype(x.dtype)
# we don't use the native matrix_rank function because the behaviour of the
# tolerance argument is difficult to unify
if hermitian:
svd_values = paddle_backend.abs(paddle_backend.eigvalsh(x))
else:
svd_values = paddle_backend.svd(x)[1]
sigma = paddle_backend.max(svd_values, axis=-1, keepdims=False)
atol = (
atol if atol is not None else ivy.finfo(x.dtype).eps * max(x.shape[-2:]) * sigma
)
rtol = rtol if rtol is not None else 0.0
tol = paddle_backend.maximum(atol, paddle_backend.multiply(rtol, sigma))
# make sure it's broadcastable again with svd_values
tol = paddle_backend.expand_dims(tol, axis=-1)
ret = paddle.count_nonzero(paddle_backend.greater(svd_values, tol), axis=-1)
if x.ndim == 2 and tol.ndim < 2:
# to fix the output shape when input is unbatched
# and tol is batched
ret = paddle_backend.squeeze(ret, axis=None)
return ret
def matrix_transpose(
x: paddle.Tensor,
/,
*,
perm: Optional[Union[Tuple[int], List[int]]] = None,
conjugate: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if conjugate:
x = paddle.conj(x)
perm = list(range(x.ndim))
perm[-1], perm[-2] = perm[-2], perm[-1]
if x.dtype in [paddle.int8, paddle.int16, paddle.uint8]:
return paddle.transpose(x.cast("float32"), perm=perm).cast(x.dtype)
return paddle.transpose(x, perm=perm)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("int32", "int64", "float64", "complex128" "float32", "complex64")
}
},
backend_version,
)
def outer(
x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
ret_dtype = x1.dtype
if x1.dtype in [
paddle.int8,
paddle.int16,
paddle.int32,
paddle.int64,
paddle.uint8,
paddle.float16,
paddle.bool,
]:
x1, x2 = x1.cast("float32"), x2.cast("float32")
return paddle.outer(x1, x2).cast(ret_dtype)
def pinv(
x: paddle.Tensor,
/,
*,
rtol: Optional[Union[float, Tuple[float]]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if rtol is None:
return paddle.linalg.pinv(x)
return paddle.linalg.pinv(x, rcond=rtol)
def tensorsolve(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
axes: Optional[Union[int, Tuple[List[int], List[int]]]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
# Implemented as a composite function in ivy.functional.ivy.linear_algebra
raise IvyNotImplementedException()
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def qr(
x: paddle.Tensor,
/,
*,
mode: str = "reduced",
out: Optional[Tuple[paddle.Tensor, paddle.Tensor]] = None,
) -> Tuple[paddle.Tensor, paddle.Tensor]:
res = namedtuple("qr", ["Q", "R"])
q, r = paddle.linalg.qr(x, mode=mode)
return res(q, r)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def slogdet(
x: paddle.Tensor,
/,
) -> Tuple[paddle.Tensor, paddle.Tensor]:
results = NamedTuple(
"slogdet", [("sign", paddle.Tensor), ("logabsdet", paddle.Tensor)]
)
sign, logabsdet = paddle.linalg.slogdet(x)
if x.ndim == 2:
sign, logabsdet = paddle_backend.squeeze(sign, axis=0), paddle_backend.squeeze(
logabsdet, axis=0
)
return results(sign, logabsdet)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def solve(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
adjoint: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if adjoint:
x1 = paddle.moveaxis(x1, -2, -1).conj()
expanded_last = False
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if len(x2.shape) <= 1:
if x2.shape[-1] == x1.shape[-1]:
expanded_last = True
x2 = paddle.unsqueeze(x2, axis=1)
for i in range(len(x1.shape) - len(x2.shape)):
x2 = paddle.unsqueeze(x2, axis=0)
ret = paddle.linalg.solve(x1, x2)
if expanded_last:
ret = paddle.squeeze(ret, axis=-1)
return ret
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, backend_version)
def svd(
x: paddle.Tensor, /, *, full_matrices: bool = True, compute_uv: bool = True
) -> Union[paddle.Tensor, Tuple[paddle.Tensor, ...]]:
ret = paddle.linalg.svd(x, full_matrices=full_matrices)
if compute_uv:
results = namedtuple("svd", "U S Vh")
return results(*ret)
else:
results = namedtuple("svd", "S")
return results(ret[1])
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def svdvals(
x: paddle.Tensor,
/,
*,
driver: Optional[str] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
# TODO:handling the driver argument
return paddle_backend.svd(x)[1]
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64")}, backend_version
)
def tensordot(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
axes: Union[int, Tuple[List[int], List[int]]] = 2,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret = paddle.tensordot(x1, x2, axes=axes)
return ret.squeeze() if x1.ndim == axes else ret
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"unsigned",
"float16",
"complex",
"bool",
)
}
},
backend_version,
)
def trace(
x: paddle.Tensor,
/,
*,
offset: int = 0,
axis1: int = 0,
axis2: int = 1,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret = paddle.trace(x, offset=offset, axis1=axis1, axis2=axis2)
return ret.squeeze() if x.ndim <= 2 else ret
def vecdot(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
axis: int = -1,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
axes = [axis % x1.ndim]
return paddle_backend.tensordot(x1, x2, axes=axes)
def vector_norm(
x: paddle.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, -inf]] = 2,
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if dtype and x.dtype != dtype:
x = x.astype(dtype)
abs_x = paddle_backend.abs(x)
if ord == 0:
return paddle_backend.sum(
(abs_x != 0).astype(abs_x.dtype), axis=axis, keepdims=keepdims
)
elif ord == inf:
return paddle_backend.max(abs_x, axis=axis, keepdims=keepdims)
elif ord == -inf:
return paddle_backend.min(abs_x, axis=axis, keepdims=keepdims)
else:
return paddle_backend.pow(
paddle_backend.sum(
paddle_backend.pow(abs_x, ord),
axis=axis,
keepdims=keepdims,
),
(1.0 / ord),
)
# Extra #
# ----- #
@with_supported_dtypes(
{"2.6.0 and below": ("float16", "float32", "float64", "int32", "int64")},
backend_version,
)
def diag(
x: paddle.Tensor,
/,
*,
k: int = 0,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.diag(x, offset=k)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("uint8", "int8", "int16", "complex64", "complex128")}},
backend_version,
)
def vander(
x: paddle.Tensor,
/,
*,
N: Optional[int] = None,
increasing: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
N = ivy.default(N, x.shape[-1])
start, stop, step = N - 1, -1, -1
if increasing:
start, stop, step = 0, N, 1
return paddle.pow(
paddle.moveaxis(paddle.unsqueeze(x, 0), 0, 1),
paddle.arange(start, stop, step, dtype=x.dtype),
)
@with_unsupported_dtypes(
{"2.6.0 and below": ("unsigned", "int8", "int16", "float16")},
backend_version,
)
def vector_to_skew_symmetric_matrix(
vector: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
batch_shape = vector.shape[:-1]
# BS x 3 x 1
vector_expanded = paddle.unsqueeze(vector, -1)
# BS x 1 x 1
a1s = vector_expanded[..., 0:1, :]
a2s = vector_expanded[..., 1:2, :]
a3s = vector_expanded[..., 2:3, :]
# BS x 1 x 1
zs = paddle.zeros(batch_shape + [1, 1], dtype=vector.dtype)
# BS x 1 x 3
row1 = paddle.concat((zs, -a3s, a2s), -1)
row2 = paddle.concat((a3s, zs, -a1s), -1)
row3 = paddle.concat((-a2s, a1s, zs), -1)
# BS x 3 x 3
return paddle.concat((row1, row2, row3), -2)
| ivy/ivy/functional/backends/paddle/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/linear_algebra.py",
"repo_id": "ivy",
"token_count": 9390
} | 23 |
# global
from typing import Union, Optional
import tensorflow as tf
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy import promote_types_of_inputs
from . import backend_version
def abs(
x: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if not tf.is_tensor(x):
x = tf.convert_to_tensor(x)
if any(("uint" in x.dtype.name, "bool" in x.dtype.name)):
return x
return tf.abs(x)
@with_unsupported_dtypes({"2.15.0 and below": ("unsigned", "bool")}, backend_version)
def acos(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.acos(x)
def acosh(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.acosh(x)
def add(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
alpha: Optional[Union[int, float]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if x1.dtype.is_bool and x2.dtype.is_bool:
return tf.math.logical_or(x1, x2)
if alpha not in (1, None):
with ivy.ArrayMode(False):
x2 = multiply(x2, alpha)
return tf.add(x1, x2)
def asin(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.asin(x)
def asinh(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.asinh(x)
def atan(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.atan(x)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def atan2(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.math.atan2(x1, x2)
def atanh(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.atanh(x)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def bitwise_and(
x1: Union[int, tf.Tensor, tf.Variable],
x2: Union[int, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)
if ("int" not in str(x1.dtype)) & ("int" not in str(x2.dtype)):
return tf.math.logical_and(x1, x2)
else:
return tf.bitwise.bitwise_and(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def bitwise_invert(
x: Union[int, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if "int" not in str(x.dtype):
return tf.logical_not(x)
else:
return tf.bitwise.invert(x)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def bitwise_left_shift(
x1: Union[int, tf.Tensor, tf.Variable],
x2: Union[int, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)
return tf.bitwise.left_shift(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def bitwise_or(
x1: Union[int, tf.Tensor, tf.Variable],
x2: Union[int, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)
if ("int" not in str(x1.dtype)) & ("int" not in str(x2.dtype)):
return tf.math.logical_or(x1, x2)
else:
return tf.bitwise.bitwise_or(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def bitwise_right_shift(
x1: Union[int, tf.Tensor, tf.Variable],
x2: Union[int, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)
return tf.bitwise.right_shift(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def bitwise_xor(
x1: Union[int, tf.Tensor, tf.Variable],
x2: Union[int, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)
if ("int" not in str(x1.dtype)) & ("int" not in str(x2.dtype)):
return tf.math.logical_xor(x1, x2)
else:
return tf.bitwise.bitwise_xor(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def ceil(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if "int" in str(x.dtype):
return x
else:
return tf.math.ceil(x)
@with_unsupported_dtypes({"2.15.0 and below": ("integer",)}, backend_version)
def cos(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.cos(x)
@with_unsupported_dtypes({"2.15.0 and below": ("float16",)}, backend_version)
def cosh(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.cosh(x)
def divide(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
ret = tf.experimental.numpy.divide(x1, x2)
if ivy.is_float_dtype(x1.dtype) or ivy.is_complex_dtype(x1.dtype):
ret = tf.cast(ret, dtype=x1.dtype)
else:
ret = tf.cast(ret, dtype=ivy.default_float_dtype(as_native=True))
return ret
def equal(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.math.equal(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("integer",)}, backend_version)
def exp(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.exp(x)
def exp2(
x: Union[tf.Tensor, tf.Variable, float, list, tuple],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.pow(2, x, name=None)
@with_supported_dtypes({"2.15.0 and below": ("float", "complex")}, backend_version)
def expm1(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.expm1(x)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def floor(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if "int" in str(x.dtype):
return x
else:
return tf.math.floor(x)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def floor_divide(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.floor_divide(x1, x2)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def fmin(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = promote_types_of_inputs(x1, x2)
x1 = tf.where(tf.math.is_nan(x1), x2, x1)
x2 = tf.where(tf.math.is_nan(x2), x1, x2)
ret = tf.experimental.numpy.minimum(x1, x2)
return ret
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def greater(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.greater(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def greater_equal(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.math.greater_equal(x1, x2)
def isfinite(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if ivy.is_int_dtype(x):
return tf.ones_like(x, tf.bool)
elif ivy.is_complex_dtype(x):
return tf.math.logical_and(
tf.math.is_finite(tf.math.real(x)), tf.math.is_finite(tf.math.imag(x))
)
else:
return tf.math.is_finite(x)
def isinf(
x: Union[tf.Tensor, tf.Variable],
/,
*,
detect_positive: bool = True,
detect_negative: bool = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if not ivy.is_complex_dtype(x):
if ivy.is_int_dtype(x):
return tf.zeros_like(x, tf.bool)
else:
if detect_negative and detect_positive:
return tf.math.is_inf(x)
elif detect_negative:
return tf.experimental.numpy.isneginf(x)
elif detect_positive:
return tf.experimental.numpy.isposinf(x)
return tf.zeros_like(x, tf.bool)
@with_unsupported_dtypes({"2.15.0 and below": ("complex", "bool")}, backend_version)
def isnan(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if ivy.is_int_dtype(x):
return tf.zeros_like(x, tf.bool)
else:
return tf.math.is_nan(x)
@with_unsupported_dtypes({"2.15.0 and below": ("unsigned",)}, backend_version)
def lcm(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.lcm(x1, x2)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"bool",
"complex",
)
},
backend_version,
)
def less(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.math.less(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def less_equal(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.math.less_equal(x1, x2)
@with_unsupported_dtypes(
{"2.15.0 and below": ("float16", "bfloat16", "integer")}, backend_version
)
def log(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.log(x)
def log10(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.log(x) / tf.math.log(tf.constant(10.0, x.dtype))
def log1p(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.log1p(x)
def log2(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.log(x) / tf.math.log(tf.constant(2.0, x.dtype))
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def logaddexp(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.logaddexp(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("float16",)}, backend_version)
def real(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.real(x)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
)
},
backend_version,
)
def logaddexp2(
x1: Union[tf.Tensor, tf.Variable, float, list, tuple],
x2: Union[tf.Tensor, tf.Variable, float, list, tuple],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = promote_types_of_inputs(x1, x2)
if not ivy.is_float_dtype(x1):
x1 = tf.cast(x1, ivy.default_float_dtype(as_native=True))
x2 = tf.cast(x2, ivy.default_float_dtype(as_native=True))
amax = ivy.maximum(x1, x2)
delta = x1 - x2
return ivy.where(
ivy.isnan(delta),
x1 + x2,
amax + ivy.log1p(ivy.exp2(-ivy.abs(delta))) / ivy.log(2.0).astype(amax.dtype),
)
def logical_and(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.logical_and(tf.cast(x1, tf.bool), tf.cast(x2, tf.bool))
def logical_not(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.logical_not(tf.cast(x, tf.bool))
def logical_or(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.logical_or(tf.cast(x1, tf.bool), tf.cast(x2, tf.bool))
def logical_xor(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.logical_xor(tf.cast(x1, tf.bool), tf.cast(x2, tf.bool))
@with_unsupported_dtypes({"2.15.0 and below": ("bool",)}, backend_version)
def multiply(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.math.multiply(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("bool", "unsigned")}, backend_version)
def negative(
x: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.negative(x)
def not_equal(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.math.not_equal(x1, x2)
def positive(
x: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.positive(x)
@with_unsupported_dtypes({"2.15.0 and below": ("bool", "unsigned")}, backend_version)
def pow(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[int, float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if ivy.is_complex_dtype(x1) and ivy.any(ivy.isinf(x2)):
ret = tf.experimental.numpy.power(x1, x2)
return tf.where(
ivy.isinf(x2), ivy.nan + ivy.nan * 1j if x2 < 0 else -0 * 1j, ret
)
if ivy.is_complex_dtype(x2) and ivy.any(x1 == 0):
ret = tf.experimental.numpy.power(x1, x2)
return tf.where(x1 == 0, ivy.nan + ivy.nan * 1j, ret)
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if ivy.is_int_dtype(x1) and ivy.any(x2 < 0):
return tf.cast(
tf.experimental.numpy.power(tf.cast(x1, tf.float32), x2),
x1.dtype,
)
return tf.experimental.numpy.power(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def remainder(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
modulus: bool = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if not modulus:
res = x1 / x2
res_floored = tf.where(res >= 0, tf.math.floor(res), tf.math.ceil(res))
diff = res - res_floored
diff, x2 = ivy.promote_types_of_inputs(diff, x2)
return tf.cast(tf.round(diff * x2), x1.dtype)
return tf.experimental.numpy.remainder(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
def round(
x: Union[tf.Tensor, tf.Variable],
/,
*,
decimals: int = 0,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if "int" in str(x.dtype):
return x
else:
if decimals == 0:
return tf.cast(tf.round(x), x.dtype)
ret_dtype = x.dtype
factor = tf.constant(10**decimals, dtype=ret_dtype)
factor_deno = tf.where(
tf.math.is_finite(factor), factor, tf.constant(1, dtype=ret_dtype)
)
return tf.cast(tf.round(x * factor) / factor_deno, ret_dtype)
@with_unsupported_dtypes({"2.15.0 and below": ("bool", "unsigned")}, backend_version)
def sign(
x: Union[tf.Tensor, tf.Variable],
/,
*,
np_variant: Optional[bool] = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.sign(x)
def sin(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.sin(x)
def sinh(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.sinh(x)
def sqrt(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.sqrt(x)
def square(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.square(x)
def subtract(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
alpha: Optional[Union[int, float]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if alpha not in (1, None):
ivy.set_array_mode(False)
x2 = multiply(x2, alpha)
ivy.unset_array_mode()
return tf.subtract(x1, x2)
def tan(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.tan(x)
def tanh(
x: Union[tf.Tensor, tf.Variable],
/,
*,
complex_mode="jax",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.tanh(x)
def trapz(
y: Union[tf.Tensor, tf.Variable],
/,
*,
x: Optional[Union[tf.Tensor, tf.Variable]] = None,
dx: float = 1.0,
axis: int = -1,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
pass
# TODO: Implement purely in tensorflow
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def trunc(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
ret = x
if not ivy.is_array(x):
raise ivy.utils.exceptions.IvyException("Input must be array")
elif "int" not in str(x.dtype):
if ret.get_shape().ndims != 0:
ret = tf.tensor_scatter_nd_update(
x, tf.where(tf.greater_equal(x, 0)), tf.math.floor(x[x >= 0])
)
ret = tf.tensor_scatter_nd_update(
ret, tf.where(tf.less(x, 0)), tf.math.ceil(x[x < 0])
)
else:
ret = (tf.math.floor if ret >= 0 else tf.math.ceil)(ret)
return ret
# Extra #
# ------#
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def erf(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.erf(x)
@with_unsupported_dtypes({"2.15.0 and below": ("complex", "bool")}, backend_version)
def maximum(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
use_where: bool = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.math.maximum(x1, x2)
@with_unsupported_dtypes({"2.15.0 and below": ("complex", "bool")}, backend_version)
def minimum(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
use_where: bool = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.math.minimum(x1, x2)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
)
},
backend_version,
)
def reciprocal(
x: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if x.dtype.is_integer:
x = tf.cast(x, tf.float32)
return tf.math.reciprocal(x)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def deg2rad(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
radians = x * ivy.pi / 180.0
return radians
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def rad2deg(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.rad2deg(x)
def isreal(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.isreal(x)
@with_unsupported_dtypes(
{"2.15.0 and below": ("uint8", "uint16", "uint32", "uint64", "complex", "bool")},
backend_version,
)
def fmod(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = promote_types_of_inputs(x1, x2)
# tf.math.floormod returns wrong results
res = tf.experimental.numpy.remainder(tf.math.abs(x1), tf.math.abs(x2))
return tf.where(x1 < 0, -res, res)
@with_unsupported_dtypes(
{"2.15.0 and below": ("uint8", "uint16", "uint32", "uint64")}, backend_version
)
def gcd(
x1: Union[tf.Tensor, tf.Variable, int, list, tuple],
x2: Union[tf.Tensor, tf.Variable, float, list, tuple],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.gcd(x1, x2)
gcd.support_native_out = False
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"uint8",
"uint16",
"uint32",
"uint64",
"bfloat16",
"int32",
)
},
backend_version,
)
def angle(
input: Union[tf.Tensor, tf.Variable],
/,
*,
deg: Optional[bool] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if deg:
return tf.math.angle(input, name=None) * (180 / tf.experimental.numpy.pi)
else:
return tf.math.angle(input, name=None)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"uint8",
"uint16",
"uint32",
"uint64",
"bfloat16",
"int32",
)
},
backend_version,
)
def imag(
val: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.imag(val, name=None)
def nan_to_num(
x: Union[tf.Tensor, tf.Variable],
/,
*,
copy: bool = True,
nan: Union[float, int] = 0.0,
posinf: Optional[Union[float, int]] = None,
neginf: Optional[Union[float, int]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
posinf = posinf if posinf is not None else x.dtype.max
neginf = neginf if neginf is not None else x.dtype.min
posinf = tf.constant(posinf, x.dtype)
neginf = tf.constant(neginf, x.dtype)
nan = tf.constant(nan, x.dtype)
ret = tf.where(tf.math.is_nan(x), nan, x)
ret = tf.where(tf.math.logical_and(tf.math.is_inf(ret), ret > 0), posinf, ret)
ret = tf.where(tf.math.logical_and(tf.math.is_inf(ret), ret < 0), neginf, ret)
if copy:
return ret
else:
x = ret
return x
| ivy/ivy/functional/backends/tensorflow/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/elementwise.py",
"repo_id": "ivy",
"token_count": 13007
} | 24 |
# global
from typing import Optional, Tuple, Union
import math
import torch
# local
import ivy
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_unsupported_device_and_dtypes,
)
from .. import backend_version
# noinspection PyProtectedMember
# Array API Standard #
# -------------------#
@with_unsupported_device_and_dtypes(
{"2.2 and below": {"cpu": ("float16",)}},
backend_version,
)
def kaiser_window(
window_length: int,
periodic: bool = True,
beta: float = 12.0,
*,
dtype: Optional[torch.dtype] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.kaiser_window(
window_length,
periodic,
beta,
dtype=dtype,
layout=torch.strided,
device=None,
requires_grad=False,
)
def hamming_window(
window_length: int,
/,
*,
periodic: bool = True,
alpha: float = 0.54,
beta: float = 0.46,
dtype: Optional[torch.dtype] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.hamming_window(
window_length,
periodic=periodic,
alpha=alpha,
beta=beta,
dtype=dtype,
layout=torch.strided,
device=None,
requires_grad=False,
)
def vorbis_window(
window_length: torch.tensor,
*,
dtype: torch.dtype = torch.float32,
out: Optional[torch.tensor] = None,
) -> torch.tensor:
return torch.tensor(
[
round(
math.sin(
(ivy.pi / 2) * (math.sin(ivy.pi * (i) / (window_length * 2)) ** 2)
),
8,
)
for i in range(1, window_length * 2)[0::2]
],
dtype=dtype,
)
vorbis_window.support_native_out = False
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def hann_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[torch.dtype] = None,
out: Optional[torch.tensor] = None,
) -> torch.tensor:
return torch.hann_window(
size,
periodic=periodic,
dtype=dtype,
)
hann_window.support_native_out = False
def tril_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
/,
*,
device: torch.device = None,
) -> Tuple[torch.Tensor, ...]:
n_cols = n_rows if n_cols is None else n_cols
if n_rows <= 0 or n_cols <= 0:
n_rows, n_cols = 0, 0
return tuple(
torch.tril_indices(
row=n_rows, col=n_cols, offset=k, dtype=torch.int64, device=device
)
)
def unsorted_segment_min(
data: torch.Tensor,
segment_ids: torch.Tensor,
num_segments: Union[int, torch.Tensor],
) -> torch.Tensor:
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
if data.dtype in [torch.float32, torch.float64, torch.float16, torch.bfloat16]:
init_val = torch.finfo(data.dtype).max
elif data.dtype in [torch.int32, torch.int64, torch.int8, torch.int16, torch.uint8]:
init_val = torch.iinfo(data.dtype).max
else:
raise TypeError("Unsupported data type")
res = torch.full(
(num_segments,) + data.shape[1:], init_val, dtype=data.dtype, device=data.device
)
for i in range(num_segments):
mask_index = segment_ids == i
if torch.any(mask_index):
res[i] = torch.min(data[mask_index], 0)[0]
return res
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def blackman_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[torch.dtype] = None,
out: Optional[torch.tensor] = None,
) -> torch.tensor:
return torch.blackman_window(
size,
periodic=periodic,
dtype=dtype,
)
blackman_window.support_native_out = False
def unsorted_segment_sum(
data: torch.Tensor,
segment_ids: torch.Tensor,
num_segments: Union[int, torch.Tensor],
) -> torch.Tensor:
# Used the same check which is used for unsorted_segment_min as the
# check should be same
# Might require to change the assertion function name to
# check_unsorted_segment_valid_params
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
res = torch.zeros(
(num_segments,) + data.shape[1:], dtype=data.dtype, device=data.device
)
for i in range(num_segments):
mask_index = segment_ids == i
if torch.any(mask_index):
res[i] = torch.sum(data[mask_index], dim=0)
return res
def trilu(
x: torch.Tensor,
/,
*,
k: int = 0,
upper: bool = True,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if upper:
return torch.triu(x, diagonal=k, out=out)
return torch.tril(x, diagonal=k, out=out)
trilu.support_native_out = True
def mel_weight_matrix(
num_mel_bins: int,
dft_length: int,
sample_rate: int,
lower_edge_hertz: float = 125.0,
upper_edge_hertz: float = 3000.0,
):
# transform the inputs to tensors
lower_edge_hertz = torch.tensor(lower_edge_hertz)
upper_edge_hertz = torch.tensor(upper_edge_hertz)
zero = torch.tensor(0.0)
# mel transform lambda function
def hz_to_mel(f):
return 2595 * torch.log10(1 + f / 700)
nyquist_hz = sample_rate / 2
# define a range of frequencies in HZ
linear_freqs = torch.linspace(0, nyquist_hz, dft_length)[1:]
# transform the frequencies from HZ to mels
spec_bin_mels = hz_to_mel(linear_freqs).unsqueeze(1)
mel_edges = torch.linspace(
hz_to_mel(lower_edge_hertz), hz_to_mel(upper_edge_hertz), num_mel_bins + 2
)
# create overlapping frames of size 3
mel_edges = mel_edges.unfold(0, size=3, step=1)
lower_edge_mel, center_mel, upper_edge_mel = (
t.reshape((1, num_mel_bins)) for t in mel_edges.split(1, dim=1)
)
lower_slopes = (spec_bin_mels - lower_edge_mel) / (center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spec_bin_mels) / (upper_edge_mel - center_mel)
mel_weights = torch.maximum(zero, torch.minimum(lower_slopes, upper_slopes))
return torch.nn.functional.pad(mel_weights, (0, 0, 1, 0))
def unsorted_segment_mean(
data: torch.Tensor,
segment_ids: torch.Tensor,
num_segments: Union[int, torch.Tensor],
) -> torch.Tensor:
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
# Initialize an array to store the sum of elements for each segment
segment_sum = torch.zeros(
(num_segments,) + data.shape[1:], dtype=data.dtype, device=data.device
)
# Initialize an array to keep track of the number of elements in each segment
counts = torch.zeros(num_segments, dtype=torch.int64, device=data.device)
for i in range(len(segment_ids)):
seg_id = segment_ids[i]
segment_sum[seg_id] += data[i]
counts[seg_id] += 1
return segment_sum / counts[:, None]
@with_unsupported_dtypes({"2.0.1 and below": "float16"}, backend_version)
def polyval(
coeffs: torch.Tensor,
x: torch.Tensor,
) -> torch.Tensor:
with ivy.PreciseMode(True):
promoted_type = ivy.promote_types(ivy.dtype(coeffs[0]), ivy.dtype(x[0]))
coeffs, x = ivy.promote_types_of_inputs(coeffs, x)
y = torch.zeros_like(x)
for coeff in coeffs:
y = y * x + coeff
if y.shape == (1,):
y = torch.unsqueeze(y, 0)
promoted_type = getattr(torch, promoted_type)
y = torch.tensor(y).to(dtype=promoted_type)
return y
| ivy/ivy/functional/backends/torch/experimental/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/creation.py",
"repo_id": "ivy",
"token_count": 3446
} | 25 |
# global
from typing import Optional, Union, Tuple, Sequence
import torch
# local
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from . import backend_version
import ivy
from ..statistical import _infer_dtype
from copy import deepcopy
@with_unsupported_dtypes(
{
"2.2 and below": (
"uint8",
"int8",
"int16",
"int32",
"int64",
"float16",
"bfloat16",
)
},
backend_version,
)
def histogram(
a: torch.Tensor,
/,
*,
bins: Optional[Union[int, torch.Tensor]] = None,
axis: Optional[int] = None,
extend_lower_interval: Optional[bool] = False,
extend_upper_interval: Optional[bool] = False,
dtype: Optional[torch.dtype] = None,
range: Optional[Tuple[float]] = None,
weights: Optional[torch.Tensor] = None,
density: Optional[bool] = False,
out: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor]:
min_a = torch.min(a)
max_a = torch.max(a)
if isinstance(bins, torch.Tensor) and range:
raise ivy.exceptions.IvyException(
"Must choose between specifying bins and range or bin edges directly"
)
if range:
bins = torch.linspace(
start=range[0], end=range[1], steps=bins + 1, dtype=a.dtype
)
range = None
elif isinstance(bins, int):
range = (min_a, max_a)
bins = torch.linspace(
start=range[0], end=range[1], steps=bins + 1, dtype=a.dtype
)
range = None
if bins.size()[0] < 2:
raise ivy.exceptions.IvyException("bins must have at least 1 bin (size > 1)")
bins_out = bins.clone()
if extend_lower_interval and min_a < bins[0]:
bins.data[0] = min_a
if extend_upper_interval and max_a > bins[-1]:
bins.data[-1] = max_a
if a.ndim > 0 and axis is not None:
inverted_shape_dims = list(torch.flip(torch.arange(a.ndim), dims=[0]))
if isinstance(axis, int):
axis = [axis]
shape_axes = 1
for dimension in axis:
inverted_shape_dims.remove(dimension)
inverted_shape_dims.append(dimension)
shape_axes *= a.shape[dimension]
a_along_axis_1d = (
a.permute(inverted_shape_dims).flatten().reshape((-1, shape_axes))
)
if weights is None:
ret = []
for a_1d in a_along_axis_1d:
ret_1d = torch.histogram(
a_1d,
bins=bins,
# TODO: waiting tensorflow version support to density
# density=density,
)[0]
ret.append(ret_1d.tolist())
else:
weights_along_axis_1d = (
weights.permute(inverted_shape_dims).flatten().reshape((-1, shape_axes))
)
ret = []
for a_1d, weights_1d in zip(a_along_axis_1d, weights_along_axis_1d):
ret_1d = torch.histogram(
a_1d,
bins=bins,
weight=weights_1d,
# TODO: waiting tensorflow version support to density
# density=density,
)[0]
ret.append(ret_1d.tolist())
out_shape = list(a.shape)
for dimension in sorted(axis, reverse=True):
del out_shape[dimension]
out_shape.insert(0, len(bins) - 1)
ret = torch.tensor(ret)
ret = ret.flatten()
index = torch.zeros(len(out_shape), dtype=int)
ret_shaped = torch.zeros(out_shape)
dim = 0
i = 0
if index.tolist() == (torch.tensor(out_shape) - 1).tolist():
ret_shaped.data[tuple(index)] = ret[i]
while index.tolist() != (torch.tensor(out_shape) - 1).tolist():
ret_shaped.data[tuple(index)] = ret[i]
dim_full_flag = False
while index[dim] == out_shape[dim] - 1:
index[dim] = 0
dim += 1
dim_full_flag = True
index[dim] += 1
i += 1
if dim_full_flag:
dim = 0
if index.tolist() == (torch.tensor(out_shape) - 1).tolist():
ret_shaped.data[tuple(index)] = ret[i]
ret = ret_shaped
else:
ret = torch.histogram(
a, bins=bins, range=range, weight=weights, density=density
)[0]
dtype = ivy.as_native_dtype(dtype)
if dtype:
ret = ret.type(dtype)
bins_out = bins_out.type(dtype)
# TODO: weird error when returning bins: return ret, bins_out
return ret
histogram.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("float16", "bool")}, backend_version)
def median(
input: torch.Tensor,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if isinstance(axis, tuple):
if len(axis) == 1:
axis = axis[0]
ret = quantile(
input,
0.5,
axis=axis,
keepdims=keepdims,
interpolation="midpoint",
)
if input.dtype in [torch.int64, torch.float64]:
ret = ret.to(torch.float64)
elif input.dtype in [torch.float16, torch.bfloat16]:
ret = ret.to(input.dtype)
else:
ret = ret.to(torch.float32)
return ret
median.support_native_out = False
@with_supported_dtypes({"2.2 and below": ("float",)}, backend_version)
def nanmean(
a: torch.Tensor,
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
dtype: Optional[torch.dtype] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.nanmean(a, dim=axis, keepdim=keepdims, dtype=dtype, out=out)
nanmean.support_native_out = True
def nanmin(
a: torch.Tensor,
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: Optional[bool] = False,
initial: Optional[Union[int, float, complex, ivy.Container]] = None,
where: Optional[torch.Tensor] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
nan_mask = torch.isnan(a)
if where is not None:
nan_mask = torch.logical_or(nan_mask, torch.logical_not(where))
a_copy = a.clone()
a_copy[nan_mask] = float("inf")
if axis is None:
result, _ = a_copy.min(), None
else:
result, _ = a_copy.min(dim=axis, keepdim=keepdims)
if initial is not None:
initial = torch.tensor(initial)
result = torch.minimum(result, initial)
return result
def nanprod(
a: torch.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[torch.dtype] = None,
keepdims: Optional[bool] = False,
out: Optional[torch.Tensor] = None,
initial: Optional[Union[int, float, complex, ivy.Container]] = None,
where: Optional[torch.Tensor] = None,
) -> torch.Tensor:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = _infer_dtype(a.dtype)
if initial is None:
initial = 1
a = a.type(dtype)
a = torch.nan_to_num(a, nan=1.0)
if a.dtype == torch.float16:
a = a.type(torch.float32)
if axis == ():
return a.type(dtype)
if axis is None:
return torch.prod(input=a, out=out).type(dtype) * initial
if isinstance(axis, (tuple, list)):
for i in axis:
a = torch.prod(a, dim=i, keepdim=keepdims, out=out).type(dtype)
if a.dtype == torch.float16:
a = a.type(torch.float32)
return a.type(dtype) * initial
return torch.prod(a, dim=axis, keepdim=keepdims, out=out).type(dtype) * initial
nanprod.support_native_out = True
def _validate_quantile(q):
if isinstance(q, float):
q = torch.as_tensor(q)
if q.ndim == 1 and torch.numel(q) < 10:
for i in range(torch.numel(q)):
if not (0.0 <= q[i] <= 1.0):
return False
else:
if not (torch.all(q >= 0) and torch.all(q <= 1)):
return False
return True
def _to_positive_axis(axis, ndim):
if not isinstance(axis, (list, tuple)):
axis = [axis]
if len(axis) == 0:
raise ValueError("Axis can't be empty!")
if len(set(axis)) != len(axis):
raise ValueError("Duplicated axis!")
for i in range(len(axis)):
if not (isinstance(axis[i], int) and (ndim > axis[i] >= -ndim)):
raise ValueError("Axis must be int in range [-rank(x), rank(x))")
if axis[i] < 0:
axis[i] += ndim
return axis
def _handle_axis(a, q, fn, keepdims=False, axis=None):
nd = a.ndim
axis_arg = deepcopy(axis)
if axis is not None:
axis = _to_positive_axis(axis, nd)
if len(axis) == 1:
axis_arg = axis[0]
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
for i, s in enumerate(sorted(keep)):
a = torch.moveaxis(a, s, i)
a = a.view(
[
*a.shape[:nkeep],
-1,
]
)
axis_arg = -1
ret = fn(a, q, axis=axis_arg)
if keepdims:
if axis is None:
index_ret = (None,) * nd
else:
index_ret = tuple(None if i in axis else slice(None) for i in range(nd))
ret = ret[(Ellipsis,) + index_ret]
return ret
def _quantile(a, q, axis=None):
ret_dtype = a.dtype
if isinstance(q, float):
q = torch.as_tensor(q)
if isinstance(q, torch.Tensor) and q.ndim > 1:
raise ValueError("q argument must be a scalar or 1-dimensional!")
if axis is None:
axis = 0
a = a.flatten()
n = a.shape[axis]
indices = q * (n - 1)
a = torch.sort(a, axis)[axis]
indices_below = torch.floor(indices).to(torch.int64)
indices_upper = torch.ceil(indices).to(torch.int64)
weights = indices - indices_below.to(torch.float64)
indices_below = torch.clip(indices_below, 0, n - 1)
indices_upper = torch.clip(indices_upper, 0, n - 1)
tensor_upper = torch.index_select(a, 0, indices_upper)
tensor_below = torch.index_select(a, 0, indices_below)
pred = weights <= 0.5
out = torch.where(pred, tensor_below, tensor_upper)
return out.to(ret_dtype)
def _compute_quantile_wrapper(
x, q, axis=None, keepdims=False, interpolation="linear", out=None
):
if not _validate_quantile(q):
raise ValueError("Quantiles must be in the range [0, 1]")
if interpolation in [
"linear",
"lower",
"higher",
"midpoint",
"nearest",
"nearest_jax",
]:
if interpolation == "nearest_jax":
return _handle_axis(x, q, _quantile, keepdims=keepdims, axis=axis)
else:
return torch.quantile(
x, q, dim=axis, keepdim=keepdims, interpolation=interpolation, out=out
)
else:
raise ValueError(
"Interpolation must be 'linear', 'lower', 'higher', 'midpoint' or 'nearest'"
)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def quantile(
a: torch.Tensor,
q: Union[torch.Tensor, float],
/,
*,
axis: Optional[Union[Sequence[int], int]] = None,
keepdims: bool = False,
interpolation: str = "linear",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# added the nearest_jax mode to enable jax-like calculations for method="nearest"
return _compute_quantile_wrapper(
a,
q,
axis=axis,
keepdims=keepdims,
interpolation=interpolation,
out=out,
)
quantile.support_native_out = True
def corrcoef(
x: torch.Tensor,
/,
*,
y: Optional[torch.Tensor] = None,
rowvar: bool = True,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if y is None:
xarr = x
else:
axis = 0 if rowvar else 1
xarr = torch.concat([x, y], dim=axis)
xarr = xarr.T if not rowvar else xarr
return torch.corrcoef(xarr)
def _nanmedian(input, axis, keepdims):
dtype = input.dtype
temp = input.to(torch.float64)
num_dim = len(temp.size())
keepdim_shape = list(temp.size())
q = 0.5
axis = [axis] if isinstance(axis, int) else list(axis)
for i in axis:
keepdim_shape[i] = 1
axis = [num_dim + x if x < 0 else x for x in axis]
axis.sort()
dimension = len(temp.size())
while len(axis) > 0:
axis1 = axis[0]
for axis2 in range(axis1 + 1, dimension):
temp = torch.transpose(temp, axis1, axis2)
axis1 = axis2
axis = [x - 1 for x in axis]
axis.pop(0)
dimension = dimension - 1
temp = torch.flatten(temp, start_dim=dimension - len(axis))
ret = torch.nanquantile(temp, q, dim=-1, keepdim=keepdims, interpolation="midpoint")
if keepdims:
keepdim_shape = tuple(keepdim_shape)
ret = ret.reshape(keepdim_shape)
if dtype in [torch.int32, torch.int64, torch.float64]:
ret = torch.asarray(ret, dtype=torch.float64)
elif dtype in [torch.float16, torch.bfloat16]:
ret = torch.asarray(ret, dtype=torch.float16)
else:
ret = torch.asarray(ret, dtype=torch.float32)
return ret
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def nanmedian(
input: torch.Tensor,
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
overwrite_input: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if overwrite_input:
copied_input = input.clone()
if axis is None:
copied_input = copied_input.flatten()
ret = torch.nanquantile(
copied_input.double(),
0.5,
dim=-1,
keepdim=keepdims,
interpolation="midpoint",
)
if input.dtype in [torch.int32, torch.int64, torch.float64]:
ret = ret.to(torch.float64)
elif input.dtype in [torch.float16, torch.bfloat16]:
ret = ret.to(torch.float16)
else:
ret = ret.to(torch.float32)
return ret
return _nanmedian(copied_input, axis, keepdims)
else:
if axis is None:
input = input.flatten()
ret = torch.nanquantile(
input.double(), 0.5, dim=-1, keepdim=keepdims, interpolation="midpoint"
)
if input.dtype in [torch.int32, torch.int64, torch.float64]:
ret = ret.to(torch.float64)
elif input.dtype in [torch.float16, torch.bfloat16]:
ret = ret.to(torch.float16)
else:
ret = ret.to(torch.float32)
return ret
return _nanmedian(input, axis, keepdims)
nanmedian.support_native_out = True
def bincount(
x: torch.Tensor,
/,
*,
weights: Optional[torch.Tensor] = None,
minlength: int = 0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if weights is None:
ret = torch.bincount(x, minlength=minlength)
ret = ret.to(x.dtype)
else:
ret = torch.bincount(x, weights=weights, minlength=minlength)
ret = ret.to(weights.dtype)
return ret
bincount.support_native_out = False
def igamma(
a: torch.Tensor,
/,
*,
x: torch.Tensor,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.special.gammainc(a, x, out=out)
igamma.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version)
def cov(
x1: torch.Tensor,
x2: torch.Tensor = None,
/,
*,
rowVar: bool = True,
bias: bool = False,
ddof: Optional[int] = None,
fweights: Optional[torch.Tensor] = None,
aweights: Optional[torch.Tensor] = None,
dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
# dtype casts separately
if fweights is not None:
fweights = fweights.type(torch.int64)
if aweights is not None:
aweights = aweights.type(torch.float64)
if x1.dim() > 2:
raise ValueError("x1 has more than 2 dimensions")
if x2 is not None:
if x2.dim() > 2:
raise ValueError("x2 has more than 2 dimensions")
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
if dtype is None:
x1 = x1.type(torch.float64)
if x2 is not None:
x2 = x2.type(torch.float64)
else:
x1 = x1.type(dtype)
if x2 is not None:
x2 = x2.type(dtype)
X = x1
if not rowVar and len(x1.shape) != 1:
X = torch.t(x1)
if x2 is not None:
if not rowVar and len(x2.shape) != 1:
x2 = torch.t(x2)
X = torch.vstack((X, x2))
return torch.cov(X, correction=ddof, fweights=fweights, aweights=aweights)
cov.support_native_out = False
@with_unsupported_dtypes(
{"2.2 and below": ("float16", "complex")},
backend_version,
)
def cummax(
x: torch.Tensor,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[torch.dtype] = None,
out: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
if exclusive or reverse:
if exclusive and reverse:
x1, x2 = torch.cummax(torch.flip(x, dims=(axis,)), axis)
x1, x2 = torch.transpose(x1, axis, -1), torch.transpose(x2, axis, -1)
x1, x2 = torch.concat(
(torch.zeros_like(x1[..., -1:]), x1[..., :-1]), -1
), torch.concat((torch.zeros_like(x2[..., -1:]), x2[..., :-1]), -1)
x1, x2 = torch.transpose(x1, axis, -1), torch.transpose(x2, axis, -1)
res1, res2 = torch.flip(x1, dims=(axis,)), torch.flip(x2, dims=(axis,))
elif exclusive:
x = torch.transpose(x, axis, -1)
x = torch.cat((torch.zeros_like(x[..., -1:]), x[..., :-1]), -1)
x1, x2 = torch.cummax(x, -1)
res1, res2 = torch.transpose(x1, axis, -1), torch.transpose(x2, axis, -1)
else:
x1, x2 = torch.cummax(torch.flip(x, dims=(axis,)), axis)
res1, res2 = torch.flip(x1, dims=(axis,)), torch.flip(x2, dims=(axis,))
return res1, res2
return torch.cummax(x, axis, out=out)
@with_unsupported_dtypes(
{
"2.2 and below": ("uint8", "float16", "bfloat16"),
"1.12.1 and above": ("uint8", "float16"),
},
backend_version,
)
def cummin(
x: torch.Tensor,
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[torch.dtype] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = _infer_dtype(x.dtype)
if not reverse:
ret = torch.cummin(x, axis)[0]
else:
ret = torch.cummin(torch.flip(x, dims=(axis,)), axis)[0]
ret = torch.flip(ret, (axis,))
if ivy.exists(out):
return ivy.inplace_update(out, ret.to(dtype))
return ret.to(dtype)
| ivy/ivy/functional/backends/torch/experimental/statistical.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/statistical.py",
"repo_id": "ivy",
"token_count": 9520
} | 26 |
import torch
import torchvision
from ivy.func_wrapper import to_native_arrays_and_back
@to_native_arrays_and_back
def roi_align(
input, boxes, output_size, spatial_scale=1.0, sampling_ratio=-1, aligned=False
):
ret = torchvision.ops.roi_align(
input, boxes, output_size, spatial_scale, sampling_ratio, aligned
)
return ret
def nms(
boxes,
scores=None,
iou_threshold=0.5,
max_output_size=None,
score_threshold=float("-inf"),
):
# boxes (Tensor[N, 4])) – boxes to perform NMS on.
# They are expected to be in (x1, y1, x2, y2) format
# with 0 <= x1 < x2 and 0 <= y1 < y2.
change_id = False
if score_threshold is not float("-inf") and scores is not None:
keep_idx = scores > score_threshold
boxes = boxes[keep_idx]
scores = scores[keep_idx]
change_id = True
nonzero = torch.nonzero(keep_idx).flatten()
if scores is None:
scores = torch.ones((boxes.shape[0],), dtype=boxes.dtype)
if len(boxes) < 2:
if len(boxes) == 1:
ret = torch.tensor([0], dtype=torch.int64)
else:
ret = torch.tensor([], dtype=torch.int64)
else:
ret = torchvision.ops.nms(boxes, scores, iou_threshold)
if change_id and len(ret) > 0:
ret = torch.tensor(nonzero[ret], dtype=torch.int64).flatten()
return ret.flatten()[:max_output_size]
| ivy/ivy/functional/backends/torch/sub_backends/torchvision/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/sub_backends/torchvision/layers.py",
"repo_id": "ivy",
"token_count": 619
} | 27 |
from . import control_flow_operators
from .control_flow_operators import *
from . import custom_gradient_operators
from .custom_gradient_operators import *
from . import linalg
from . import operators
from .operators import *
from . import parallel_operators
from .parallel_operators import *
| ivy/ivy/functional/frontends/jax/lax/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/lax/__init__.py",
"repo_id": "ivy",
"token_count": 79
} | 28 |
# local
import ivy
from ivy.functional.frontends.jax.func_wrapper import (
to_ivy_arrays_and_back,
)
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs
from ivy.functional.frontends.numpy.manipulation_routines import trim_zeros
from ivy.utils.einsum_path_helpers import (
parse_einsum_input,
compute_size_by_dict,
flop_count,
greedy_path,
optimal_path,
find_contraction,
can_dot,
)
@to_ivy_arrays_and_back
def absolute(x, /):
return ivy.abs(x)
@to_ivy_arrays_and_back
def add(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.add(x1, x2)
@to_ivy_arrays_and_back
def angle(z, deg=False):
return ivy.angle(z, deg=deg)
@to_ivy_arrays_and_back
def arccos(x, /):
return ivy.acos(x)
@to_ivy_arrays_and_back
def arccosh(x, /):
return ivy.acosh(x)
@to_ivy_arrays_and_back
def arcsin(x, /):
return ivy.asin(x)
@to_ivy_arrays_and_back
def arcsinh(x, /):
return ivy.asinh(x)
@to_ivy_arrays_and_back
def arctan(x, /):
return ivy.atan(x)
@to_ivy_arrays_and_back
def arctan2(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.atan2(x1, x2)
@to_ivy_arrays_and_back
def arctanh(x, /):
return ivy.atanh(x)
@to_ivy_arrays_and_back
def around(a, decimals=0, out=None):
ret_dtype = a.dtype
return ivy.round(a, decimals=decimals, out=out).astype(ret_dtype, copy=False)
@with_unsupported_dtypes(
{"0.4.24 and below": ("bfloat16",)},
"jax",
)
@to_ivy_arrays_and_back
def cbrt(x, /):
all_positive = ivy.pow(ivy.abs(x), 1.0 / 3.0)
return ivy.where(ivy.less(x, 0.0), ivy.negative(all_positive), all_positive)
@to_ivy_arrays_and_back
def ceil(x, /):
return ivy.ceil(x)
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def clip(a, a_min=None, a_max=None, out=None):
return ivy.array(ivy.clip(a, a_min, a_max), dtype=a.dtype)
@to_ivy_arrays_and_back
def conj(x, /):
return ivy.conj(x)
@to_ivy_arrays_and_back
def conjugate(x, /):
return ivy.conj(x)
@to_ivy_arrays_and_back
def convolve(a, v, mode="full", *, precision=None):
a, v = promote_types_of_jax_inputs(a, v)
if len(a) < len(v):
a, v = v, a
v = ivy.flip(v)
out_order = slice(None)
if mode == "valid":
padding = [(0, 0)]
elif mode == "same":
padding = [(v.shape[0] // 2, v.shape[0] - v.shape[0] // 2 - 1)]
elif mode == "full":
padding = [(v.shape[0] - 1, v.shape[0] - 1)]
a = a.reshape([1, 1, a.shape[0]])
v = v.reshape([v.shape[0], 1, 1])
result = ivy.conv_general_dilated(
a,
v,
(1,),
padding,
dims=1,
data_format="channel_first",
)
return result[0, 0, out_order]
@to_ivy_arrays_and_back
def copysign(x1, x2, /):
return ivy.copysign(x1, x2)
@to_ivy_arrays_and_back
def cos(x, /):
return ivy.cos(x)
@to_ivy_arrays_and_back
def cosh(x, /):
return ivy.cosh(x)
@to_ivy_arrays_and_back
def deg2rad(x, /):
return ivy.deg2rad(x)
@to_ivy_arrays_and_back
def degrees(x, /):
return ivy.rad2deg(x)
@to_ivy_arrays_and_back
def diff(a, n=1, axis=-1, prepend=None, append=None):
return ivy.diff(a, n=n, axis=axis, prepend=prepend, append=append, out=None)
@to_ivy_arrays_and_back
def divide(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
if ivy.dtype(x1) in ["int64", "uint64"]:
x1 = ivy.astype(x1, ivy.float64)
elif ivy.is_int_dtype(x1):
x1 = ivy.astype(x1, ivy.float32)
return ivy.divide(x1, x2).astype(x1.dtype)
@to_ivy_arrays_and_back
def divmod(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return (ivy.floor_divide(x1, x2), ivy.remainder(x1, x2))
@to_ivy_arrays_and_back
def dot(a, b, *, precision=None):
a, b = promote_types_of_jax_inputs(a, b)
return ivy.matmul(a, b)
@to_ivy_arrays_and_back
def ediff1d(ary, to_end=None, to_begin=None):
diffs = ivy.diff(ary)
diffs_dtype = diffs.dtype
if to_begin is not None:
if not isinstance(to_begin, (list, tuple)):
to_begin = [to_begin]
to_begin = ivy.array(to_begin, dtype=diffs_dtype)
diffs = ivy.concat((to_begin, diffs))
if to_end is not None:
if not isinstance(to_end, (list, tuple)):
to_end = [to_end]
to_end = ivy.array(to_end, dtype=diffs_dtype)
diffs = ivy.concat((diffs, to_end))
return diffs
@to_ivy_arrays_and_back
def einsum_path(subscripts, *operands, optimize="greedy"):
# Figure out what the path really is
path_type = optimize
if path_type is True:
path_type = "greedy"
if path_type is None:
path_type = False
explicit_einsum_path = False
memory_limit = None
# No optimization or a named path algorithm
if (path_type is False) or isinstance(path_type, str):
pass
# Given an explicit path
elif len(path_type) and (path_type[0] == "einsum_path"):
explicit_einsum_path = True
# Path tuple with memory limit
elif (
(len(path_type) == 2)
and isinstance(path_type[0], str)
and isinstance(path_type[1], (int, float))
):
memory_limit = int(path_type[1])
path_type = path_type[0]
else:
raise TypeError(f"Did not understand the path: {str(path_type)}")
# Python side parsing
if subscripts:
input_subscripts, output_subscript, operands = parse_einsum_input(
operands, subscripts=subscripts
)
else:
input_subscripts, output_subscript, operands = parse_einsum_input(operands)
# Build a few useful list and sets
input_list = input_subscripts.split(",")
input_sets = [set(x) for x in input_list]
output_set = set(output_subscript)
indices = set(input_subscripts.replace(",", ""))
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
broadcast_indices = [[] for x in range(len(input_list))]
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError(
"Einstein sum subscript %s does not contain the "
"correct number of indices for operand %d."
% (input_subscripts[tnum], tnum)
)
for cnum, char in enumerate(term):
dim = sh[cnum]
# Build out broadcast indices
if dim == 1:
broadcast_indices[tnum].append(char)
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
dimension_dict[char] = dim
elif dim not in (1, dimension_dict[char]):
raise ValueError(
"Size of label '%s' for operand %d (%d) "
"does not match previous terms (%d)."
% (char, tnum, dimension_dict[char], dim)
)
else:
dimension_dict[char] = dim
# Convert broadcast inds to sets
broadcast_indices = [set(x) for x in broadcast_indices]
# Compute size of each input array plus the output array
size_list = [
compute_size_by_dict(term, dimension_dict)
for term in input_list + [output_subscript]
]
max_size = max(size_list)
if memory_limit is None:
memory_arg = max_size
else:
memory_arg = memory_limit
# Compute naive cost
# This isn't quite right, need to look into exactly how einsum does this
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
naive_cost = flop_count(indices, inner_product, len(input_list), dimension_dict)
# Compute the path
if explicit_einsum_path:
path = path_type[1:]
elif (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
path = greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = optimal_path(input_sets, output_set, dimension_dict, memory_arg)
else:
raise KeyError(f"Path name {path_type} not found")
cost_list, scale_list, size_list, contraction_list = [], [], [], []
# Build contraction tuple (positions, gemm, einsum_str, remaining)
for cnum, contract_inds in enumerate(path):
# Make sure we remove inds from right to left
contract_inds = tuple(sorted(contract_inds, reverse=True))
contract = find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
cost = flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(compute_size_by_dict(out_inds, dimension_dict))
bcast = set()
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
bcast |= broadcast_indices.pop(x)
new_bcast_inds = bcast - idx_removed
# If we're broadcasting, nix blas
if not len(idx_removed & bcast):
do_blas = can_dot(tmp_inputs, out_inds, idx_removed)
else:
do_blas = False
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
else:
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
broadcast_indices.append(new_bcast_inds)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
if len(input_list) != 1:
# Explicit "einsum_path" is usually trusted, but we detect this kind of
# mistake in order to prevent from returning an intermediate value.
raise RuntimeError(
f"Invalid einsum_path is specified: {len(input_list) - 1} "
"more operands has to be contracted."
)
# Return the path along with a nice string representation
overall_contraction = input_subscripts + "->" + output_subscript
header = ("scaling", "current", "remaining")
speedup = naive_cost / opt_cost
max_i = max(size_list)
path_print = f" Complete contraction: {overall_contraction}\n"
path_print += f" Naive scaling: {len(indices)}\n"
path_print += f" Optimized scaling: {max(scale_list)}\n"
path_print += f" Naive FLOP count: {naive_cost:.3e}\n"
path_print += f" Optimized FLOP count: {opt_cost:.3e}\n"
path_print += f" Theoretical speedup: {speedup:3.3f}\n"
path_print += f" Largest intermediate: {max_i:.3e} elements\n"
path_print += "-" * 74 + "\n"
path_print += "%6s %24s %40s\n" % header
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
ret = (path, path_print)
return ret
@to_ivy_arrays_and_back
def exp(
x,
/,
):
return ivy.exp(x)
@to_ivy_arrays_and_back
def exp2(x, /):
return ivy.exp2(x)
@to_ivy_arrays_and_back
def expm1(
x,
/,
):
return ivy.expm1(x)
@with_unsupported_dtypes(
{"0.4.24 and below": ("uint16",)},
"jax",
)
@to_ivy_arrays_and_back
def fabs(x, /):
return ivy.abs(x)
@to_ivy_arrays_and_back
def fix(x, out=None):
return ivy.fix(x, out=out)
@to_ivy_arrays_and_back
def float_power(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.float_power(x1, x2).astype(x1.dtype, copy=False)
@to_ivy_arrays_and_back
def floor(x, /):
return ivy.floor(x)
@to_ivy_arrays_and_back
def floor_divide(x1, x2, /, out=None):
return ivy.floor_divide(x1, x2, out=out)
@to_ivy_arrays_and_back
def fmax(x1, x2):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.fmax(x1, x2)
@to_ivy_arrays_and_back
def fmin(x1, x2):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.fmin(x1, x2)
@to_ivy_arrays_and_back
def fmod(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.fmod(x1, x2)
@to_ivy_arrays_and_back
def frexp(x, /):
return ivy.frexp(x)
@to_ivy_arrays_and_back
def gcd(x1, x2):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.gcd(x1, x2)
@to_ivy_arrays_and_back
def gradient(f, *varargs, axis=None, edge_order=None):
edge_order = edge_order if edge_order is not None else 1
return ivy.gradient(f, spacing=varargs, axis=axis, edge_order=edge_order)
@to_ivy_arrays_and_back
def heaviside(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.heaviside(x1, x2)
@to_ivy_arrays_and_back
def hypot(x1, x2, /):
return ivy.hypot(x1, x2)
@to_ivy_arrays_and_back
def i0(x):
return ivy.i0(x)
@to_ivy_arrays_and_back
def imag(val, /):
return ivy.imag(val)
@to_ivy_arrays_and_back
def inner(a, b):
a, b = promote_types_of_jax_inputs(a, b)
return ivy.inner(a, b)
@to_ivy_arrays_and_back
def interp(x, xp, fp, left=None, right=None, period=None):
return ivy.interp(x, xp, fp, left=left, right=right, period=period)
@to_ivy_arrays_and_back
def kron(a, b):
a, b = promote_types_of_jax_inputs(a, b)
return ivy.kron(a, b)
@to_ivy_arrays_and_back
def lcm(x1, x2):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.lcm(x1, x2)
@to_ivy_arrays_and_back
def ldexp(x1, x2, /):
return ivy.ldexp(x1, x2)
@to_ivy_arrays_and_back
def log(x, /):
return ivy.log(x)
@to_ivy_arrays_and_back
def log10(x, /):
return ivy.log10(x)
@to_ivy_arrays_and_back
def log1p(x, /):
return ivy.log1p(x)
@to_ivy_arrays_and_back
def log2(x, /):
return ivy.log2(x)
@to_ivy_arrays_and_back
def logaddexp(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.logaddexp(x1, x2)
@to_ivy_arrays_and_back
def logaddexp2(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.logaddexp2(x1, x2)
@to_ivy_arrays_and_back
def matmul(a, b, *, precision=None):
a, b = promote_types_of_jax_inputs(a, b)
return ivy.matmul(a, b)
@to_ivy_arrays_and_back
def maximum(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.maximum(x1, x2)
@to_ivy_arrays_and_back
def minimum(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.minimum(x1, x2)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"0.4.24 and below": ("complex",)}, "jax")
def mod(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.remainder(x1, x2)
@to_ivy_arrays_and_back
def modf(x, /, out=None):
y1 = ivy.where(x >= 0, ivy.floor(x), ivy.ceil(x)) # integral part
y2 = x - y1 # fractional part
dtype_str = str(x.dtype)
if "float" in dtype_str:
return y2, y1
# floats return as they were. u/ints (8, 16, 32) return as float32, 64 as float64.
dtype_size = x.itemsize * 8
if "int8" in dtype_str or "int16" in dtype_str:
dtype_size = 32
ret_type = f"float{dtype_size}"
return y2.astype(ret_type), y1.astype(ret_type)
@to_ivy_arrays_and_back
def multiply(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.multiply(x1, x2)
@to_ivy_arrays_and_back
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
return ivy.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf)
@to_ivy_arrays_and_back
def negative(
x,
/,
):
return ivy.negative(x)
@with_unsupported_dtypes(
{
"0.4.24 and below": (
"bfloat16",
"float16",
)
},
"jax",
)
@to_ivy_arrays_and_back
def nextafter(x1, x2, /):
return ivy.nextafter(x1, x2)
@to_ivy_arrays_and_back
def outer(a, b, out=None):
return ivy.outer(a, b, out=out)
@to_ivy_arrays_and_back
def poly(seq_of_zeros):
seq_of_zeros = ivy.atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = ivy.eigvals(seq_of_zeros)
if seq_of_zeros.ndim != 1:
raise ValueError("input must be 1d or non-empty square 2d array.")
dt = seq_of_zeros.dtype
if len(seq_of_zeros) == 0:
return ivy.ones((), dtype=dt)
a = ivy.ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = convolve(
a, ivy.asarray([ivy.array(1), -seq_of_zeros[k]], dtype=dt), mode="full"
)
return a
@to_ivy_arrays_and_back
def polyadd(a1, a2):
d = max(a1.size, a2.size)
a1 = ivy.pad(a1, (d - a1.size, 0), mode="constant")
a2 = ivy.pad(a2, (d - a2.size, 0), mode="constant")
return a1 + a2
@with_unsupported_dtypes(
{"0.4.24 and below": ("float16",)},
"jax",
)
@to_ivy_arrays_and_back
def polyder(p, m=1):
if m < 0:
raise ValueError("Order of derivative must be positive.")
if m == 0:
return p
p_dtype = p.dtype
coeff = ivy.prod(
ivy.expand_dims(ivy.arange(m, len(p), dtype=p_dtype))
- ivy.expand_dims(ivy.arange(m, dtype=p_dtype), axis=1),
axis=0,
)
return (p[:-m] * coeff[::-1]).astype(p_dtype)
@with_unsupported_dtypes(
{"0.3.14 and below": ("float16",)},
"jax",
)
@to_ivy_arrays_and_back
def polydiv(u, v, *, trim_leading_zeros=False):
u, v_arr = ivy.promote_types_of_inputs(u, v)
n = v_arr.shape[0] - 1
m = u.shape[0] - 1
scale = 1.0 / v_arr[0]
q = ivy.zeros((max(m - n + 1, 1),), dtype=u.dtype)
r = ivy.copy_array(u)
for k in range(0, m - n + 1):
d = scale * r[k]
q[k] = d
r[k : k + n + 1] = r[k : k + n + 1] - (d * v_arr)
# if trim_leading_zeros:
# r = trim_zeros_tol(r, trim='f')
# TODO: need to control tolerance of this function to handle the argument
return q, r
@with_unsupported_dtypes(
{"0.4.24 and below": ("float16",)},
"jax",
)
@to_ivy_arrays_and_back
def polyint(p, m=1, k=None):
p = ivy.asarray(p)
m = int(m)
if m == 0:
return p
if k is None:
k_arr = ivy.zeros((m,), dtype=p.dtype)
elif isinstance(k, (int, float)):
k_arr = ivy.full((m,), k, dtype=p.dtype)
elif ivy.asarray(k).shape == (1,):
k_arr = ivy.full((m,), ivy.asarray(k)[0], dtype=p.dtype)
elif ivy.asarray(k).shape == (m,):
k_arr = ivy.asarray(k, dtype=p.dtype)
else:
raise ValueError("k must be a scalar or a rank-1 array of length 1 or m.")
grid = (
ivy.arange(p.size + m, dtype=p.dtype)[ivy.newaxis]
- ivy.arange(m, dtype=p.dtype)[:, ivy.newaxis]
)
coeff = ivy.maximum(1, grid).prod(axis=0)[::-1]
return ivy.divide(ivy.concat((p, k_arr)), coeff).astype(p.dtype)
@to_ivy_arrays_and_back
def polymul(a1, a2, *, trim_leading_zeros=False):
a1, a2 = ivy.atleast_1d(a1), ivy.atleast_1d(a2)
if trim_leading_zeros and (len(a1) > 1 or len(a2) > 1):
a1, a2 = trim_zeros(a1, trim="f"), trim_zeros(a2, trim="f")
if len(a1) == 0:
a1 = ivy.asarray([0], dtype=a1.dtype)
if len(a2) == 0:
a2 = ivy.asarray([0], dtype=a2.dtype)
return convolve(a1, a2, mode="full")
@to_ivy_arrays_and_back
def polysub(a1, a2):
n = max(a1.size, a2.size) - 1
a1 = ivy.pad(a1, (0, n - a1.size + 1), mode="constant")
a2 = ivy.pad(a2, (0, n - a2.size + 1), mode="constant")
return a1 - a2
@to_ivy_arrays_and_back
def positive(
x,
/,
):
return ivy.positive(x)
@to_ivy_arrays_and_back
def power(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.pow(x1, x2)
@to_ivy_arrays_and_back
def product(
a,
*,
axis=None,
dtype=None,
keepdims=False,
initial=None,
where=None,
promote_integers=True,
out=None,
):
if ivy.is_array(where):
a = ivy.where(where, a, ivy.default(out, ivy.ones_like(a)), out=out)
if promote_integers:
if ivy.is_uint_dtype(a.dtype):
dtype = "uint64"
elif ivy.is_int_dtype(a.dtype):
dtype = "int64"
if initial is not None:
if axis is not None:
s = ivy.to_list(ivy.shape(a, as_array=True))
s[axis] = 1
header = ivy.full(ivy.Shape(tuple(s)), initial)
a = ivy.concat([header, a], axis=axis)
else:
a[0] *= initial
return ivy.prod(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@to_ivy_arrays_and_back
def rad2deg(
x,
/,
):
return ivy.rad2deg(x)
@to_ivy_arrays_and_back
def radians(x, /):
return ivy.deg2rad(x)
@to_ivy_arrays_and_back
def real(val, /):
return ivy.real(val)
@to_ivy_arrays_and_back
def reciprocal(x, /):
return ivy.reciprocal(x)
@to_ivy_arrays_and_back
def remainder(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.remainder(x1, x2)
@to_ivy_arrays_and_back
def round(a, decimals=0, out=None):
return ivy.round(a, decimals=decimals, out=out)
# sign
@to_ivy_arrays_and_back
def sign(x, /):
return ivy.sign(x, out=None)
@to_ivy_arrays_and_back
def signbit(x, /):
x = ivy.array(x)
return ivy.signbit(x)
@to_ivy_arrays_and_back
def sin(x, /):
return ivy.sin(x)
@to_ivy_arrays_and_back
def sinc(x, /):
return ivy.sinc(x)
@to_ivy_arrays_and_back
def sinh(x, /):
return ivy.sinh(x)
@to_ivy_arrays_and_back
def sqrt(x, /):
return ivy.sqrt(x)
@to_ivy_arrays_and_back
def square(x, /):
return ivy.square(x)
@to_ivy_arrays_and_back
def subtract(x1, x2, /):
x1, x2 = promote_types_of_jax_inputs(x1, x2)
return ivy.subtract(x1, x2)
@to_ivy_arrays_and_back
def tan(x, /):
return ivy.tan(x)
@to_ivy_arrays_and_back
def tanh(x, /):
return ivy.tanh(x)
@to_ivy_arrays_and_back
def tensordot(a, b, axes=2):
a, b = promote_types_of_jax_inputs(a, b)
return ivy.tensordot(a, b, axes=axes)
@to_ivy_arrays_and_back
def trace(a, offset=0, axis1=0, axis2=1, out=None):
return ivy.trace(a, offset=offset, axis1=axis1, axis2=axis2, out=out)
@to_ivy_arrays_and_back
def trapz(y, x=None, dx=1.0, axis=-1, out=None):
return ivy.trapz(y, x=x, dx=dx, axis=axis, out=out)
@to_ivy_arrays_and_back
def trunc(x):
return ivy.trunc(x)
@to_ivy_arrays_and_back
def vdot(a, b):
a, b = promote_types_of_jax_inputs(a, b)
return ivy.multiply(a, b).sum()
abs = absolute
true_divide = divide
| ivy/ivy/functional/frontends/jax/numpy/mathematical_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/mathematical_functions.py",
"repo_id": "ivy",
"token_count": 11417
} | 29 |
import ivy
from ivy.utils.exceptions import handle_exceptions
from numbers import Number
from typing import Union, Tuple, Iterable
# Constructing dtypes are required as ivy.<dtype>
# will change dynamically on the backend and may not be available
_int8 = ivy.IntDtype("int8")
_int16 = ivy.IntDtype("int16")
_int32 = ivy.IntDtype("int32")
_int64 = ivy.IntDtype("int64")
_uint8 = ivy.UintDtype("uint8")
_uint16 = ivy.UintDtype("uint16")
_uint32 = ivy.UintDtype("uint32")
_uint64 = ivy.UintDtype("uint64")
_bfloat16 = ivy.FloatDtype("bfloat16")
_float16 = ivy.FloatDtype("float16")
_float32 = ivy.FloatDtype("float32")
_float64 = ivy.FloatDtype("float64")
_complex64 = ivy.ComplexDtype("complex64")
_complex128 = ivy.ComplexDtype("complex128")
_bool = ivy.Dtype("bool")
mxnet_promotion_table = {
(_bool, _bool): _bool,
(_bool, _int8): _int8,
(_bool, _int32): _int32,
(_bool, _int64): _int64,
(_bool, _uint8): _uint8,
(_bool, _bfloat16): _bfloat16,
(_bool, _float16): _float16,
(_bool, _float32): _float32,
(_bool, _float64): _float64,
(_int8, _bool): _int8,
(_int8, _int8): _int8,
(_int8, _int32): _int32,
(_int8, _int64): _int64,
(_int32, _bool): _int32,
(_int32, _int8): _int32,
(_int32, _int32): _int32,
(_int32, _int64): _int64,
(_int64, _bool): _int64,
(_int64, _int8): _int64,
(_int64, _int32): _int64,
(_int64, _int64): _int64,
(_uint8, _bool): _uint8,
(_uint8, _uint8): _uint8,
(_int32, _uint8): _int32,
(_int64, _uint8): _int64,
(_uint8, _int32): _int32,
(_uint8, _int64): _int64,
(_float16, _bool): _float16,
(_float16, _float16): _float16,
(_float16, _float32): _float32,
(_float16, _float64): _float64,
(_float32, _bool): _float32,
(_float32, _float16): _float32,
(_float32, _float32): _float32,
(_float32, _float64): _float64,
(_float64, _bool): _float64,
(_float64, _float16): _float64,
(_float64, _float32): _float64,
(_float64, _float64): _float64,
(_int8, _float16): _float16,
(_float16, _int8): _float16,
(_int8, _float32): _float32,
(_float32, _int8): _float32,
(_int8, _float64): _float64,
(_float64, _int8): _float64,
(_int32, _float16): _float64,
(_float16, _int32): _float64,
(_int32, _float32): _float64,
(_float32, _int32): _float64,
(_int32, _float64): _float64,
(_float64, _int32): _float64,
(_int64, _float16): _float64,
(_float16, _int64): _float64,
(_int64, _float32): _float64,
(_float32, _int64): _float64,
(_int64, _float64): _float64,
(_float64, _int64): _float64,
(_uint8, _float16): _float16,
(_float16, _uint8): _float16,
(_uint8, _float32): _float32,
(_float32, _uint8): _float32,
(_uint8, _float64): _float64,
(_float64, _uint8): _float64,
(_bfloat16, _bfloat16): _bfloat16,
(_bfloat16, _uint8): _bfloat16,
(_uint8, _bfloat16): _bfloat16,
(_bfloat16, _int8): _bfloat16,
(_int8, _bfloat16): _bfloat16,
(_bfloat16, _float32): _float32,
(_float32, _bfloat16): _float32,
(_bfloat16, _float64): _float64,
(_float64, _bfloat16): _float64,
}
@handle_exceptions
def promote_types_mxnet(
type1: Union[ivy.Dtype, ivy.NativeDtype],
type2: Union[ivy.Dtype, ivy.NativeDtype],
/,
) -> ivy.Dtype:
"""Promote the datatypes type1 and type2, returning the data type they
promote to.
Parameters
----------
type1
the first of the two types to promote
type2
the second of the two types to promote
Returns
-------
ret
The type that both input types promote to
"""
try:
ret = mxnet_promotion_table[(ivy.as_ivy_dtype(type1), ivy.as_ivy_dtype(type2))]
except KeyError as e:
raise ivy.utils.exceptions.IvyException(
"these dtypes are not type promotable"
) from e
return ret
@handle_exceptions
def promote_types_of_mxnet_inputs(
x1: Union[ivy.Array, Number, Iterable[Number]],
x2: Union[ivy.Array, Number, Iterable[Number]],
/,
) -> Tuple[ivy.Array, ivy.Array]:
"""Promote the dtype of the given native array inputs to a common dtype
based on type promotion rules.
While passing float or integer values or any other non-array input
to this function, it should be noted that the return will be an
array-like object. Therefore, outputs from this function should be
used as inputs only for those functions that expect an array-like or
tensor-like objects, otherwise it might give unexpected results.
"""
type1 = ivy.default_dtype(item=x1).strip("u123456789")
type2 = ivy.default_dtype(item=x2).strip("u123456789")
if hasattr(x1, "dtype") and not hasattr(x2, "dtype") and type1 == type2:
x1 = ivy.asarray(x1)
x2 = ivy.asarray(
x2, dtype=x1.dtype, device=ivy.default_device(item=x1, as_native=False)
)
elif not hasattr(x1, "dtype") and hasattr(x2, "dtype") and type1 == type2:
x1 = ivy.asarray(
x1, dtype=x2.dtype, device=ivy.default_device(item=x2, as_native=False)
)
x2 = ivy.asarray(x2)
else:
x1 = ivy.asarray(x1)
x2 = ivy.asarray(x2)
promoted = promote_types_mxnet(x1.dtype, x2.dtype)
x1 = ivy.asarray(x1, dtype=promoted)
x2 = ivy.asarray(x2, dtype=promoted)
return x1, x2
from . import random
from . import ndarray
from . import linalg
from .linalg import *
from . import mathematical_functions
from .mathematical_functions import *
from . import creation
from .creation import *
from . import symbol
from .symbol import *
| ivy/ivy/functional/frontends/mxnet/numpy/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mxnet/numpy/__init__.py",
"repo_id": "ivy",
"token_count": 2499
} | 30 |
from . import indexing_like_operations
from .indexing_like_operations import *
| ivy/ivy/functional/frontends/numpy/indexing_routines/lib/stride_tricks/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/indexing_routines/lib/stride_tricks/__init__.py",
"repo_id": "ivy",
"token_count": 23
} | 31 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def rollaxis(a, axis, start=0):
n = len(ivy.shape(a))
if axis < -n or axis >= n:
raise ValueError(f"axis {axis} is out of bounds for array of {n} dimensions")
if axis < 0:
axis += n
if start < 0:
start += n
msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
if not (0 <= start < n + 1):
raise ValueError(msg % ("start", -n, "start", n + 1, start))
if axis < start:
start -= 1
end = start + axis
axes = tuple(i for i in range(n) if i != axis)
axes = axes[:start] + (axis,) + axes[start:end] + axes[end:]
return ivy.permute_dims(a, axes, out=None)
@to_ivy_arrays_and_back
def swapaxes(a, axis1, axis2):
return ivy.swapaxes(a, axis1, axis2)
@to_ivy_arrays_and_back
def transpose(array, /, *, axes=None):
if not axes:
axes = list(range(len(array.shape)))[::-1]
if isinstance(axes, int):
axes = [axes]
if (len(array.shape) == 0 and not axes) or (len(array.shape) == 1 and axes[0] == 0):
return array
return ivy.permute_dims(array, axes, out=None)
| ivy/ivy/functional/frontends/numpy/manipulation_routines/transpose_like_operations.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/transpose_like_operations.py",
"repo_id": "ivy",
"token_count": 538
} | 32 |
from . import ndarray
from .ndarray import ndarray
| ivy/ivy/functional/frontends/numpy/ndarray/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/ndarray/__init__.py",
"repo_id": "ivy",
"token_count": 16
} | 33 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_dtype,
)
@handle_numpy_dtype
@to_ivy_arrays_and_back
def corrcoef(x, y=None, /, *, rowvar=True, bias=None, ddof=None, dtype="float64"):
if (bias is not None) or (ddof is not None):
ivy.warn("bias and ddof are deprecated and have no effect")
x = x.astype("float64")
if y is not None:
y = y.astype("float64")
return ivy.corrcoef(x, y=y, rowvar=rowvar).astype(dtype)
@to_ivy_arrays_and_back
def correlate(a, v, mode=None, *, old_behavior=False):
dtypes = [x.dtype for x in [a, v]]
mode = mode if mode is not None else "valid"
ivy.utils.assertions.check_equal(a.ndim, 1, as_array=False)
ivy.utils.assertions.check_equal(v.ndim, 1, as_array=False)
n = min(a.shape[0], v.shape[0])
m = max(a.shape[0], v.shape[0])
if a.shape[0] >= v.shape[0]:
if mode == "full":
r = n + m - 1
for j in range(0, n - 1):
a = ivy.concat((ivy.array([0]), a), axis=0)
elif mode == "same":
r = m
right_pad = (n - 1) // 2
left_pad = (n - 1) - (n - 1) // 2
for _ in range(0, left_pad):
a = ivy.concat((ivy.array([0]), a), axis=0)
for _ in range(0, right_pad):
a = ivy.concat((a, ivy.array([0])), axis=0)
elif mode == "valid":
r = m - n + 1
else:
raise ivy.utils.exceptions.IvyException("invalid mode")
ret = ivy.array(
[ivy.to_list((v[:n] * ivy.roll(a, -t)[:n]).sum()) for t in range(0, r)],
dtype=max(dtypes),
)
else:
if mode == "full":
r = n + m - 1
for j in range(0, n - 1):
v = ivy.concat((ivy.array([0]), v), axis=0)
elif mode == "same":
r = m
right_pad = (n - 1) // 2
left_pad = (n - 1) - (n - 1) // 2
for _ in range(0, left_pad):
v = ivy.concat((ivy.array([0]), v), axis=0)
for _ in range(0, right_pad):
v = ivy.concat((v, ivy.array([0])), axis=0)
elif mode == "valid":
r = m - n + 1
else:
raise ivy.utils.exceptions.IvyException("invalid mode")
ret = ivy.flip(
ivy.array(
[ivy.to_list((a[:n] * ivy.roll(v, -t)[:n]).sum()) for t in range(0, r)],
dtype=max(dtypes),
)
)
return ret
| ivy/ivy/functional/frontends/numpy/statistics/correlating.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/statistics/correlating.py",
"repo_id": "ivy",
"token_count": 1399
} | 34 |
from typing import Callable
import functools
import ivy
import ivy.functional.frontends.paddle as paddle_frontend
# --- Helpers --- #
# --------------- #
def _from_ivy_array_to_paddle_frontend_tensor(x, nested=False, include_derived=None):
if nested:
return ivy.nested_map(
_from_ivy_array_to_paddle_frontend_tensor, x, include_derived, shallow=False
)
elif isinstance(x, ivy.Array) or ivy.is_native_array(x):
a = paddle_frontend.Tensor(x)
return a
return x
def _to_ivy_array(x):
# if x is a native array return it as an ivy array
if isinstance(x, ivy.NativeArray):
return ivy.array(x)
# else if x is a frontend torch Tensor (or any frontend "Tensor" actually) return the wrapped ivy array # noqa: E501
elif hasattr(x, "ivy_array"):
return x.ivy_array
# else just return x
return x
# --- Main --- #
# ------------ #
def inputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def new_fn(*args, **kwargs):
"""Convert `Tensor` into `ivy.Array` instances.
Convert all `Tensor` instances in both the positional and keyword arguments
into `ivy.Array` instances, and then call the function with the updated
arguments.
"""
# convert all input arrays to ivy.Array instances
new_args = ivy.nested_map(
_to_ivy_array, args, include_derived={"tuple": True}, shallow=False
)
new_kwargs = ivy.nested_map(
_to_ivy_array, kwargs, include_derived={"tuple": True}, shallow=False
)
return fn(*new_args, **new_kwargs)
return new_fn
def outputs_to_frontend_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def new_fn(*args, **kwargs):
"""Convert `ivy.Array` into `Tensor` instances.
Call the function, and then convert all `ivy.Array` instances returned by the
function into `Tensor` instances.
"""
# call unmodified function
# ToDo: Remove this default dtype setting
# once frontend specific backend setting is added
# ivy.set_default_int_dtype("int64")
# ivy.set_default_float_dtype(paddle_frontend.get_default_dtype())
try:
ret = fn(*args, **kwargs)
finally:
ivy.unset_default_int_dtype()
ivy.unset_default_float_dtype()
# convert all arrays in the return to `paddle_frontend.Tensor` instances
return _from_ivy_array_to_paddle_frontend_tensor(
ret, nested=True, include_derived={"tuple": True}
)
return new_fn
def to_ivy_arrays_and_back(fn: Callable) -> Callable:
"""Wrap `fn` so it receives and returns `ivy.Array` instances.
Wrap `fn` so that input arrays are all converted to `ivy.Array` instances and
return arrays are all converted to `Tensor` instances.
"""
return outputs_to_frontend_arrays(inputs_to_ivy_arrays(fn))
| ivy/ivy/functional/frontends/paddle/func_wrapper.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/func_wrapper.py",
"repo_id": "ivy",
"token_count": 1243
} | 35 |
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
from ivy.utils.assertions import check_equal
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
def affine_grid(theta, out_shape, align_corners=True):
if len(out_shape) == 4:
N, C, H, W = out_shape
base_grid = ivy.empty((N, H, W, 3))
if align_corners:
base_grid[:, :, :, 0] = ivy.linspace(-1, 1, W)
base_grid[:, :, :, 1] = ivy.expand_dims(ivy.linspace(-1, 1, H), axis=-1)
height_values = ivy.expand_dims(ivy.linspace(-1, 1, H), axis=-1)
base_grid[:, :, :, 1] = ivy.array(
[[[height_values[i]] * W for i in range(H)]]
)[:, :, :, 0]
base_grid[:, :, :, 2] = ivy.full((H, W), 1)
grid = ivy.matmul(base_grid.view((N, H * W, 3)), theta.swapaxes(1, 2))
return grid.view((N, H, W, 2))
else:
base_grid[:, :, :, 0] = ivy.linspace(-1, 1, W) * (W - 1) / W
base_grid[:, :, :, 1] = ivy.expand_dims(
ivy.linspace(-1, 1, H) * (H - 1) / H, axis=-1
)
height_values = ivy.expand_dims(
ivy.linspace(-1, 1, H) * (H - 1) / H, axis=-1
)
base_grid[:, :, :, 1] = ivy.array(
[[[height_values[i]] * W for i in range(H)]]
)[:, :, :, 0]
base_grid[:, :, :, 2] = ivy.full((H, W), 1)
grid = ivy.matmul(base_grid.view((N, H * W, 3)), ivy.swapaxes(theta, 1, 2))
return grid.view((N, H, W, 2))
else:
N, C, D, H, W = out_shape
base_grid = ivy.empty((N, D, H, W, 4))
if align_corners:
base_grid[:, :, :, :, 0] = ivy.linspace(-1, 1, W)
base_grid[:, :, :, :, 1] = ivy.expand_dims(ivy.linspace(-1, 1, H), axis=-1)
height_values = ivy.linspace(-1, 1, H)
base_grid[:, :, :, :, 1] = ivy.array(
[[[[height_values[i]] * W for i in range(H)]] * D]
)
base_grid[:, :, :, :, 2] = ivy.expand_dims(
ivy.expand_dims(ivy.linspace(-1, 1, D), axis=-1), axis=-1
)
width_values = ivy.linspace(-1, 1, D)
else:
base_grid[:, :, :, :, 0] = ivy.linspace(-1, 1, W) * (W - 1) / W
base_grid[:, :, :, :, 1] = ivy.expand_dims(
ivy.linspace(-1, 1, H) * (H - 1) / H, axis=-1
)
height_values = ivy.linspace(-1, 1, H) * (H - 1) / H
base_grid[:, :, :, :, 1] = ivy.array(
[[[[height_values[i]] * W for i in range(H)]] * D]
)
base_grid[:, :, :, :, 2] = ivy.expand_dims(
ivy.expand_dims(ivy.linspace(-1, 1, D) * (D - 1) / D, axis=-1), axis=-1
)
width_values = ivy.linspace(-1, 1, D) * (D - 1) / D
base_grid[:, :, :, :, 2] = ivy.array(
[[ivy.array([[width_values[i]] * W] * H) for i in range(D)]]
)
base_grid[:, :, :, :, 3] = ivy.full((D, H, W), 1)
grid = ivy.matmul(base_grid.view((N, D * H * W, 4)), theta.swapaxes(1, 2))
return grid.view((N, D, H, W, 3))
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def channel_shuffle(x, groups, data_format="NCHW", name=None):
if len(ivy.shape(x)) != 4:
raise ValueError(
"Input x should be 4D tensor, but received x with the shape of"
f" {ivy.shape(x)}"
)
if not isinstance(groups, int):
raise TypeError("groups must be int type")
if groups <= 0:
raise ValueError("groups must be positive")
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'.But receive"
f" Attr(data_format): {data_format} "
)
if data_format == "NCHW":
b, c, h, w = ivy.shape(x)
x = ivy.reshape(x, (b, groups, c // groups, h, w))
x = ivy.permute_dims(x, (0, 2, 1, 3, 4))
x = ivy.reshape(x, (b, c, h, w))
else:
b, h, w, c = ivy.shape(x)
x = ivy.reshape(x, (b, h, w, groups, c // groups))
x = ivy.permute_dims(x, (0, 1, 2, 4, 3))
x = ivy.reshape(x, (b, h, w, c))
return x
@to_ivy_arrays_and_back
def pixel_shuffle(x, upscale_factor, data_format="NCHW"):
input_shape = ivy.shape(x)
check_equal(
len(input_shape),
4,
message=f"pixel shuffle requires a 4D input, but got input size {input_shape}",
)
if not isinstance(upscale_factor, int):
raise TypeError("upscale factor must be int type")
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'.But receive"
f" Attr(data_format): {data_format} "
)
b = input_shape[0]
c = input_shape[1] if data_format == "NCHW" else input_shape[3]
h = input_shape[2] if data_format == "NCHW" else input_shape[1]
w = input_shape[3] if data_format == "NCHW" else input_shape[2]
upscale_factor_squared = upscale_factor**2
check_equal(
c % upscale_factor_squared,
0,
message=(
"pixel shuffle expects input channel to be divisible by square of upscale"
f" factor, but got input with sizes {input_shape}, upscale"
f" factor={upscale_factor}, and self.size(1)={c}, is not divisible by"
f" {upscale_factor_squared}"
),
as_array=False,
)
oc = int(c / upscale_factor_squared)
oh = h * upscale_factor
ow = w * upscale_factor
if data_format == "NCHW":
input_reshaped = ivy.reshape(x, (b, oc, upscale_factor, upscale_factor, h, w))
else:
input_reshaped = ivy.reshape(x, (b, h, w, upscale_factor, upscale_factor, oc))
if data_format == "NCHW":
return ivy.reshape(
ivy.permute_dims(input_reshaped, (0, 1, 4, 2, 5, 3)), (b, oc, oh, ow)
)
return ivy.reshape(
ivy.permute_dims(input_reshaped, (0, 1, 4, 2, 5, 3)), (b, oh, ow, oc)
)
@to_ivy_arrays_and_back
def pixel_unshuffle(x, downscale_factor, data_format="NCHW"):
if len(ivy.shape(x)) != 4:
raise ValueError(
"Input x should be 4D tensor, but received x with the shape of"
f" {ivy.shape(x)}"
)
if not isinstance(downscale_factor, int):
raise TypeError("Downscale factor must be int type")
if downscale_factor <= 0:
raise ValueError("Downscale factor must be positive")
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'.But receive"
f" Attr(data_format): {data_format} "
)
if data_format == "NCHW":
b, c, h, w = ivy.shape(x)
oc = c * downscale_factor**2
oh = h // downscale_factor
ow = w // downscale_factor
x = ivy.reshape(x, (b, c, oh, downscale_factor, ow, downscale_factor))
x = ivy.permute_dims(x, (0, 1, 3, 5, 2, 4))
x = ivy.reshape(x, (b, oc, oh, ow))
else:
b, h, w, c = ivy.shape(x)
oc = c * downscale_factor**2
oh = h // downscale_factor
ow = w // downscale_factor
x = ivy.reshape(x, (b, downscale_factor, oh, downscale_factor, ow, c))
x = ivy.permute_dims(x, (0, 1, 3, 5, 2, 4))
x = ivy.reshape(x, (b, oh, ow, oc))
return x
| ivy/ivy/functional/frontends/paddle/nn/functional/vision.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/nn/functional/vision.py",
"repo_id": "ivy",
"token_count": 3991
} | 36 |
import ivy
from ivy.func_wrapper import (
with_supported_dtypes,
with_unsupported_device_and_dtypes,
)
from ..tensor.tensor import Tensor
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
# --- Helpers --- #
# --------------- #
def _blend_images(img1, img2, ratio):
# TODO: ivy.check_float(img1) returns False for ivy array
# TODO: when lerp supports int type and when the above issue is fixed,
# replace this with ivy.check_float(img1)
max_value = (
1.0 if ivy.dtype(img1) == "float32" or ivy.dtype(img1) == "float64" else 255.0
)
return ivy.astype(
ivy.lerp(img2, img1, float(ratio)).clip(0, max_value), ivy.dtype(img1)
)
# helpers
def _get_image_c_axis(data_format):
if data_format.lower() == "chw":
return -3
elif data_format.lower() == "hwc":
return -1
def _get_image_num_channels(img, data_format):
return ivy.shape(img)[_get_image_c_axis(data_format)]
def _hsv_to_rgb(img):
h, s, v = img[0], img[1], img[2]
f = h * 6.0
i = ivy.floor(f)
f = f - i
i = ivy.astype(i, ivy.int32) % 6
p = ivy.clip(v * (1.0 - s), 0.0, 1.0)
q = ivy.clip(v * (1.0 - s * f), 0.0, 1.0)
t = ivy.clip(v * (1.0 - s * (1.0 - f)), 0.0, 1.0)
mask = ivy.astype(
ivy.equal(
ivy.expand_dims(i, axis=-3),
ivy.reshape(ivy.arange(6, dtype=ivy.dtype(i)), (-1, 1, 1)),
),
ivy.dtype(img),
)
matrix = ivy.stack(
[
ivy.stack([v, q, p, p, t, v], axis=-3),
ivy.stack([t, v, v, q, p, p], axis=-3),
ivy.stack([p, p, t, v, v, q], axis=-3),
],
axis=-4,
)
return ivy.einsum("...ijk, ...xijk -> ...xjk", mask, matrix)
def _rgb_to_hsv(img):
maxc = ivy.max(img, axis=-3)
minc = ivy.min(img, axis=-3)
is_equal = ivy.equal(maxc, minc)
one_divisor = ivy.ones_like(maxc)
c_delta = maxc - minc
s = c_delta / ivy.where(is_equal, one_divisor, maxc)
r, g, b = img[0], img[1], img[2]
c_delta_divisor = ivy.where(is_equal, one_divisor, c_delta)
rc = (maxc - r) / c_delta_divisor
gc = (maxc - g) / c_delta_divisor
bc = (maxc - b) / c_delta_divisor
hr = ivy.where((maxc == r), bc - gc, ivy.zeros_like(maxc))
hg = ivy.where(
((maxc == g) & (maxc != r)),
rc - bc + 2.0,
ivy.zeros_like(maxc),
)
hb = ivy.where(
((maxc != r) & (maxc != g)),
gc - rc + 4.0,
ivy.zeros_like(maxc),
)
h = (hr + hg + hb) / 6.0 + 1.0
h = h - ivy.trunc(h)
return ivy.stack([h, s, maxc], axis=-3)
# --- Main --- #
# ------------ #
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def adjust_brightness(img, brightness_factor):
assert brightness_factor >= 0, "brightness_factor should be non-negative."
assert _get_image_num_channels(img, "CHW") in [
1,
3,
], "channels of input should be either 1 or 3."
extreme_target = ivy.zeros_like(img)
return _blend_images(img, extreme_target, brightness_factor)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64", "uint8")}, "paddle")
@to_ivy_arrays_and_back
def adjust_hue(img, hue_factor):
assert -0.5 <= hue_factor <= 0.5, "hue_factor should be in range [-0.5, 0.5]"
channels = _get_image_num_channels(img, "CHW")
if channels == 1:
return img
elif channels == 3:
if ivy.dtype(img) == "uint8":
img = ivy.astype(img, "float32") / 255.0
img_hsv = _rgb_to_hsv(img)
h, s, v = img_hsv[0], img_hsv[1], img_hsv[2]
h = h + hue_factor
h = h - ivy.floor(h)
img_adjusted = _hsv_to_rgb(ivy.stack([h, s, v], axis=-3))
else:
raise ValueError("channels of input should be either 1 or 3.")
return img_adjusted
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
)
@to_ivy_arrays_and_back
def hflip(img):
img = ivy.array(img)
return ivy.flip(img, axis=-1)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
)
def normalize(img, mean, std, data_format="CHW", to_rgb=False):
if ivy.is_array(img):
if data_format == "HWC":
permuted_axes = [2, 0, 1]
else:
permuted_axes = [0, 1, 2]
img_np = ivy.permute(img, permuted_axes)
normalized_img = ivy.divide(ivy.subtract(img_np, mean), std)
return normalized_img
else:
raise ValueError("Unsupported input format")
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
)
@to_ivy_arrays_and_back
def pad(img, padding, fill=0, padding_mode="constant"):
dim_size = img.ndim
if not hasattr(padding, "__len__"):
if dim_size == 2:
trans_padding = ((padding, padding), (padding, padding))
elif dim_size == 3:
trans_padding = ((0, 0), (padding, padding), (padding, padding))
elif len(padding) == 2:
if dim_size == 2:
trans_padding = ((padding[1], padding[1]), (padding[0], padding[0]))
elif dim_size == 3:
trans_padding = ((0, 0), (padding[1], padding[1]), (padding[0], padding[0]))
elif len(padding) == 4:
if dim_size == 2:
trans_padding = ((padding[1], padding[3]), (padding[0], padding[2]))
elif dim_size == 3:
trans_padding = ((0, 0), (padding[1], padding[3]), (padding[0], padding[2]))
else:
raise ValueError("padding can only be 1D with size 1, 2, 4 only")
if padding_mode in ["constant", "edge", "reflect", "symmetric"]:
return ivy.pad(img, trans_padding, mode=padding_mode, constant_values=fill)
else:
raise ValueError("Unsupported padding_mode")
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
)
@to_ivy_arrays_and_back
def to_tensor(pic, data_format="CHW"):
array = ivy.array(pic)
return Tensor(array)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("int8", "uint8", "int16", "float16", "bfloat16", "bool")
}
},
"paddle",
)
@to_ivy_arrays_and_back
def vflip(img, data_format="CHW"):
if data_format.lower() == "chw":
axis = -2
elif data_format.lower() == "hwc":
axis = -3
return ivy.flip(img, axis=axis)
| ivy/ivy/functional/frontends/paddle/vision/transforms.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/vision/transforms.py",
"repo_id": "ivy",
"token_count": 3155
} | 37 |
import ivy
FEATURE_THRESHOLD = 1e-7
class Splitter:
def __init__(
self,
criterion,
max_features,
min_samples_leaf,
min_weight_leaf,
random_state,
*args,
):
self.criterion = criterion
self.n_samples = 0
self.n_features = 0
self.max_features = max_features
self.min_samples_leaf = min_samples_leaf
self.min_weight_leaf = min_weight_leaf
self.random_state = random_state
def init(
self,
X,
y,
sample_weight,
missing_values_in_feature_mask,
*args,
):
n_samples = X.shape[0]
self.samples = ivy.empty(n_samples, dtype=ivy.int32)
samples = self.samples
j = 0
weighted_n_samples = 0.0
for i in range(n_samples):
if sample_weight is None or sample_weight[i] != 0.0:
samples[j] = i
j += 1
if sample_weight is not None:
weighted_n_samples += sample_weight[i]
else:
weighted_n_samples += 1.0
self.n_samples = j
self.weighted_n_samples = weighted_n_samples
n_features = X.shape[1]
self.features = ivy.arange(n_features, dtype=ivy.int32)
self.n_features = n_features
self.feature_values = ivy.empty(n_samples, dtype=ivy.float32)
self.constant_features = ivy.empty(n_features, dtype=ivy.int32)
self.y = y
self.sample_weight = sample_weight
if missing_values_in_feature_mask is not None:
self.criterion.init_sum_missing()
return 0
def node_reset(self, start, end, weighted_n_node_samples):
self.start = start
self.end = end
self.criterion.init(
self.y,
self.sample_weight,
self.weighted_n_samples,
self.samples,
start,
end,
)
weighted_n_node_samples = self.criterion.weighted_n_node_samples
return 0, weighted_n_node_samples
def node_split(self, impurity, split, n_constant_features):
pass
def node_value(self, dest, node_id):
return self.criterion.node_value(dest, node_id)
def node_impurity(self):
return self.criterion.node_impurity()
class DensePartitioner:
X = []
samples = []
feature_values = []
start = 0
end = 0
n_missing = 0
missing_values_in_feature_mask = []
def __init__(
self,
X,
samples,
feature_values,
missing_values_in_feature_mask,
):
self.X = X
self.samples = samples
self.feature_values = feature_values
self.missing_values_in_feature_mask = missing_values_in_feature_mask
def init_node_split(self, start, end):
self.start = start
self.end = end
self.n_missing = 0
def sort_samples_and_feature_values(self, current_feature):
feature_values = self.feature_values
X = self.X
samples = self.samples
n_missing = 0
missing_values_in_feature_mask = self.missing_values_in_feature_mask
if (
missing_values_in_feature_mask is not None
and missing_values_in_feature_mask[current_feature]
):
i, current_end = self.start, self.end - 1
while i <= current_end:
if ivy.isnan(X[samples[current_end], current_feature]):
n_missing += 1
current_end -= 1
continue
if ivy.isnan(X[samples[i], current_feature]):
samples[i], samples[current_end] = samples[current_end], samples[i]
n_missing += 1
current_end -= 1
feature_values[i] = X[samples[i], current_feature]
i += 1
else:
for i in range(self.start, self.end):
feature_values[i] = X[int(samples[i]), int(current_feature)]
(
self.feature_values[self.start : self.end],
self.samples[self.start : self.end],
) = sort(
feature_values[self.start : self.end],
samples[self.start : self.end],
self.end - self.start - n_missing,
)
self.n_missing = n_missing
def find_min_max(
self,
current_feature: int,
min_feature_value_out: float,
max_feature_value_out: float,
):
current_feature = 0
X = self.X
samples = self.samples
min_feature_value = X[samples[self.start], current_feature]
max_feature_value = min_feature_value
feature_values = self.feature_values
feature_values[self.start] = min_feature_value
for p in range(self.start + 1, self.end):
current_feature_value = X[samples[p], current_feature]
feature_values[p] = current_feature_value
if current_feature_value < min_feature_value:
min_feature_value = current_feature_value
elif current_feature_value > max_feature_value:
max_feature_value = current_feature_value
return min_feature_value, max_feature_value
def next_p(self, p_prev: int, p: int):
feature_values = self.feature_values
end_non_missing = self.end - self.n_missing
while (
p + 1 < end_non_missing
and feature_values[p + 1] <= feature_values[p] + FEATURE_THRESHOLD
):
p += 1
p_prev = p
p += 1
return p_prev, p
def partition_samples(self, current_thershold: float):
p = self.start
partition_end = self.end
samples = self.samples
feature_values = self.feature_values
while p < partition_end:
if feature_values[p] <= current_thershold:
p += 1
else:
partition_end -= 1
feature_values[p], feature_values[partition_end] = (
feature_values[partition_end],
feature_values[p],
)
samples[p], samples[partition_end] = (
samples[partition_end],
samples[p],
)
return partition_end
def partition_samples_final(
self,
best_pos,
best_threshold,
best_feature,
best_n_missing,
):
start = self.start
p = start
end = self.end - 1
partition_end = end - best_n_missing
samples = self.samples
X = self.X
if best_n_missing != 0:
while p < partition_end:
if ivy.isnan(X[samples[end], best_feature]):
end -= 1
continue
current_value = X[samples[p], best_feature]
if ivy.isnan(current_value):
samples[p], samples[end] = samples[end], samples[p]
end -= 1
current_value = X[samples[p], best_feature]
if current_value <= best_threshold:
p += 1
else:
samples[p], samples[partition_end] = (
samples[partition_end],
samples[p],
)
partition_end -= 1
else:
while p < partition_end:
if X[samples[p], best_feature] <= best_threshold:
p += 1
else:
samples[p], samples[partition_end] = (
samples[partition_end],
samples[p],
)
partition_end -= 1
self.samples = samples
class SplitRecord:
def __init__(
self,
feature=0,
pos=0,
threshold=0.0,
improvement=-ivy.inf,
impurity_left=0.0,
impurity_right=0.0,
missing_go_to_left=False,
n_missing=0,
):
self.feature = feature
self.pos = pos
self.threshold = threshold
self.improvement = improvement
self.impurity_left = impurity_left
self.impurity_right = impurity_right
self.missing_go_to_left = missing_go_to_left
self.n_missing = n_missing
class BestSplitter(Splitter):
def init(
self,
X,
y,
sample_weight,
missing_values_in_feature_mask,
*args,
):
Splitter.init(self, X, y, sample_weight, missing_values_in_feature_mask, *args)
self.partitioner = DensePartitioner(
X, self.samples, self.feature_values, missing_values_in_feature_mask
)
def node_split(self, impurity, split, n_constant_features):
return node_split_best(
self,
self.partitioner,
self.criterion,
impurity,
split,
n_constant_features,
)
# --- Helpers --- #
# --------------- #
def _init_split(split_record, start_pos):
split_record.impurity_left = ivy.inf
split_record.impurity_right = ivy.inf
split_record.pos = start_pos
split_record.feature = 0
split_record.threshold = 0.0
split_record.improvement = -ivy.inf
split_record.missing_go_to_left = False
split_record.n_missing = 0
return split_record
# --- Main --- #
# ------------ #
def node_split_best(
splitter, partitioner, criterion, impurity, split, n_constant_features
):
start = splitter.start
end = splitter.end
features = splitter.features
constant_features = splitter.constant_features
n_features = splitter.n_features
feature_values = splitter.feature_values
max_features = splitter.max_features
min_samples_leaf = splitter.min_samples_leaf
min_weight_leaf = splitter.min_weight_leaf
best_split = SplitRecord()
current_split = SplitRecord()
best_proxy_improvement = -ivy.inf
f_i = n_features
p_prev = 0
n_visited_features = 0
# Number of features discovered to be constant during the split search
n_found_constants = 0
# Number of features known to be constant and drawn without replacement
n_drawn_constants = 0
n_known_constants = n_constant_features
# n_total_constants = n_known_constants + n_found_constants
n_total_constants = n_known_constants
best_split = _init_split(best_split, end)
partitioner.init_node_split(start, end)
while f_i > n_total_constants and (
n_visited_features < max_features
or n_visited_features <= n_found_constants + n_drawn_constants
):
n_visited_features += 1
f_j = ivy.randint(n_drawn_constants, f_i - n_found_constants)
if f_j < n_known_constants:
features[n_drawn_constants], features[f_j] = (
features[f_j],
features[n_drawn_constants],
)
n_drawn_constants += 1
continue
# f_j in the interval [n_known_constants, f_i - n_found_constants[
f_j += n_found_constants
# f_j in the interval [n_total_constants, f_i[
current_split.feature = features[f_j]
partitioner.sort_samples_and_feature_values(current_split.feature)
n_missing = partitioner.n_missing
end_non_missing = end - n_missing
if (
end_non_missing == start
or feature_values[end_non_missing - 1]
<= feature_values[start] + FEATURE_THRESHOLD
):
features[f_j], features[n_total_constants] = (
features[n_total_constants],
features[f_j],
)
n_found_constants += 1
n_total_constants += 1
continue
f_i -= 1
features[f_i], features[f_j] = features[f_j], features[f_i]
has_missing = n_missing != 0
criterion.init_missing(n_missing)
n_searches = 2 if has_missing else 1
for i in range(n_searches):
missing_go_to_left = i == 1
criterion.missing_go_to_left = missing_go_to_left
criterion.reset()
p = start
while p < end_non_missing:
p_prev, p = partitioner.next_p(p_prev, p)
if p >= end_non_missing:
continue
if missing_go_to_left:
n_left = p - start + n_missing
n_right = end_non_missing - p
else:
n_left = p - start
n_right = end_non_missing - p + n_missing
if n_left < min_samples_leaf or n_right < min_samples_leaf:
continue
current_split.pos = p
criterion.update(current_split.pos)
if (
criterion.weighted_n_left < min_weight_leaf
or criterion.weighted_n_right < min_weight_leaf
):
continue
current_proxy_improvement = criterion.proxy_impurity_improvement()
if current_proxy_improvement > best_proxy_improvement:
best_proxy_improvement = current_proxy_improvement
current_split.threshold = (
feature_values[p_prev] / 2.0 + feature_values[p] / 2.0
)
if current_split.threshold in (
feature_values[p],
ivy.inf,
-ivy.inf,
):
current_split.threshold = feature_values[p_prev]
current_split.n_missing = n_missing
if n_missing == 0:
current_split.missing_go_to_left = n_left > n_right
else:
current_split.missing_go_to_left = missing_go_to_left
best_split = SplitRecord(**current_split.__dict__)
if has_missing:
n_left, n_right = end - start - n_missing, n_missing
p = end - n_missing
missing_go_to_left = 0
if not ((n_left < min_samples_leaf) or (n_right < min_samples_leaf)):
criterion.missing_go_to_left = missing_go_to_left
criterion.update(p)
if not (
criterion.weighted_n_left < min_weight_leaf
or criterion.weighted_n_right < min_weight_leaf
):
current_proxy_improvement = criterion.proxy_impurity_improvement()
if current_proxy_improvement > best_proxy_improvement:
best_proxy_improvement = current_proxy_improvement
current_split.threshold = ivy.inf
current_split.missing_go_to_left = missing_go_to_left
current_split.n_missing = n_missing
current_split.pos = p
best_split = current_split
# Reorganize into samples[start:best_split.pos] + samples[best_split.pos:end]
if best_split.pos < end:
partitioner.partition_samples_final(
best_split.pos,
best_split.threshold,
best_split.feature,
best_split.n_missing,
)
if best_split.n_missing != 0:
criterion.init_missing(best_split.n_missing)
criterion.missing_go_to_left = best_split.missing_go_to_left
criterion.reset()
criterion.update(best_split.pos)
(
best_split.impurity_left,
best_split.impurity_right,
) = criterion.children_impurity(
best_split.impurity_left, best_split.impurity_right
)
best_split.improvement = criterion.impurity_improvement(
impurity, best_split.impurity_left, best_split.impurity_right
)
# best_split, samples = shift_missing_values_to_left_if_required(
# best_split, samples, end)
# todo : implement shift_missing_values_to_left_if_required
features[0:n_known_constants] = constant_features[0:n_known_constants]
constant_features[n_known_constants:n_found_constants] = features[
n_known_constants:n_found_constants
]
split = best_split
n_constant_features = n_total_constants
return 0, n_constant_features, split
def sort(feature_values, samples, n):
if n == 0:
return
idx = ivy.argsort(feature_values)
return feature_values[idx], samples[idx]
| ivy/ivy/functional/frontends/sklearn/tree/_splitter.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/tree/_splitter.py",
"repo_id": "ivy",
"token_count": 8436
} | 38 |
from . import activations
from . import backend
from . import layers
from . import metrics
from . import regularizers
| ivy/ivy/functional/frontends/tensorflow/keras/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/keras/__init__.py",
"repo_id": "ivy",
"token_count": 27
} | 39 |
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
from ivy.functional.frontends.torch import promote_types_of_torch_inputs
import ivy.functional.frontends.torch as torch_frontend
erfinv = torch_frontend.special.erfinv
@to_ivy_arrays_and_back
def atleast_1d(*tensors):
return ivy.atleast_1d(*tensors)
@to_ivy_arrays_and_back
def atleast_2d(*tensors):
return ivy.atleast_2d(*tensors)
@to_ivy_arrays_and_back
def atleast_3d(*tensors):
return ivy.atleast_3d(*tensors)
# TODO: Add Ivy function for block_diag but only scipy.linalg and \
# and torch supports block_diag currently
@to_ivy_arrays_and_back
def block_diag(*tensors):
shapes_list = [ivy.shape(t) for t in tensors]
# TODO: Add ivy function to return promoted dtype for multiple tensors at once
promoted_dtype = ivy.as_ivy_dtype(tensors[0].dtype)
for idx in range(1, len(tensors)):
promoted_dtype = torch_frontend.promote_types_torch(
tensors[idx - 1].dtype, tensors[idx].dtype
)
inp_tensors = [ivy.asarray(t, dtype=promoted_dtype) for t in tensors]
tensors_2d = []
result_dim_0, result_dim_1 = 0, 0
for idx, t_shape in enumerate(shapes_list):
dim_0, dim_1 = 1, 1
if len(t_shape) > 2:
raise ivy.exceptions.IvyError(
"Input tensors must have 2 or fewer dimensions."
f"Input {idx} has {len(t_shape)} dimensions"
)
elif len(t_shape) == 2:
dim_0, dim_1 = t_shape
tensors_2d.append(inp_tensors[idx])
elif len(t_shape) == 1:
dim_1 = t_shape[0]
tensors_2d.append(ivy.reshape(inp_tensors[idx], shape=(dim_0, dim_1)))
else:
tensors_2d.append(ivy.reshape(inp_tensors[idx], shape=(dim_0, dim_1)))
result_dim_0 += dim_0
result_dim_1 += dim_1
shapes_list[idx] = (dim_0, dim_1)
ret = ivy.zeros((result_dim_0, result_dim_1), dtype=promoted_dtype)
ret_dim_0 = 0
ret_dim_1 = 0
for idx, t_shape in enumerate(shapes_list):
dim_0, dim_1 = t_shape
ret[
ret_dim_0 : ret_dim_0 + dim_0, ret_dim_1 : ret_dim_1 + dim_1
] = ivy.copy_array(tensors_2d[idx])
ret_dim_0 += dim_0
ret_dim_1 += dim_1
return ret
@to_ivy_arrays_and_back
def broadcast_shapes(*shapes):
return ivy.broadcast_shapes(*shapes)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
@to_ivy_arrays_and_back
def broadcast_to(tensor, shape):
return ivy.broadcast_to(tensor, shape)
@to_ivy_arrays_and_back
def cartesian_prod(*tensors):
if len(tensors) == 1:
return tensors
ret = ivy.meshgrid(*tensors, indexing="ij")
ret = ivy.stack(ret, axis=-1)
ret = ivy.reshape(ret, shape=(-1, len(tensors)))
return ret
@with_unsupported_dtypes({"2.2 and below": "float16"}, "torch")
@to_ivy_arrays_and_back
def cdist(x1, x2, p=2.0, compute_mode="use_mm_for_euclid_dist_if_necessary"):
if len(x1.shape) == 2 and len(x2.shape) == 2:
x1_first_dim, x2_first_dim = x1.shape[0], x2.shape[0]
if (
compute_mode == "use_mm_for_euclid_dist_if_necessary"
and (x1_first_dim > 25 or x2_first_dim > 25)
or compute_mode == "use_mm_for_euclid_dist"
):
return ivy.vector_norm(x1[:, None, :] - x2[None, :, :], axis=-1, ord=p)
else:
distances = ivy.zeros((x1_first_dim, x2_first_dim), dtype=x1.dtype)
for i in range(x1_first_dim):
for j in range(x2_first_dim):
distances[i, j] = ivy.vector_norm(x1[i, :] - x2[j, :], ord=p)
return distances
if p == 2:
B, P, M = x1.shape
_, R, _ = x2.shape
if (
compute_mode == "use_mm_for_euclid_dist_if_necessary"
and (P > 25 or R > 25)
or compute_mode == "use_mm_for_euclid_dist"
):
return ivy.vector_norm(
x1[:, :, None, :] - x2[:, None, :, :], axis=-1, ord=p
)
else:
distances = ivy.zeros((B, P, R), dtype=x1.dtype)
for b in range(B):
for i in range(P):
for j in range(R):
distances[b, i, j] = ivy.vector_norm(
x1[b, i, :] - x2[b, j, :], ord=p
)
return distances
else:
return ivy.vector_norm(x1[:, :, None, :] - x2[:, None, :, :], axis=-1, ord=p)
@to_ivy_arrays_and_back
def clone(input, *, memory_format=None):
return ivy.copy_array(input)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bool")}, "torch")
@to_ivy_arrays_and_back
def corrcoef(input):
if len(ivy.shape(input)) > 2:
raise ivy.exceptions.IvyError(
"corrcoef(): expected input to have two or fewer dimensions but got an"
f" input with {ivy.shape(input)} dimensions"
)
return ivy.corrcoef(input, y=None, rowvar=True)
@to_ivy_arrays_and_back
def cov(input, /, *, correction=1, fweights=None, aweights=None):
return ivy.cov(input, ddof=correction, fweights=fweights, aweights=aweights)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def cross(input, other, dim=None, *, out=None):
if dim is None:
dim = -1
input, other = promote_types_of_torch_inputs(input, other)
return ivy.cross(input, other, axisa=-1, axisb=-1, axisc=-1, axis=dim, out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"uint16",
"uint32",
"uint64",
"bfloat16",
"float16",
"complex64",
"complex128",
)
},
"torch",
)
def cummax(input, dim, *, out=None):
input_dtype = input.dtype
result_values, result_indices = ivy.cummax(input, axis=dim, out=out)
result_values = result_values.astype(input_dtype)
return result_values, result_indices
@to_ivy_arrays_and_back
def cumprod(input, dim, *, dtype=None, out=None):
if not dtype and "int" in input.dtype:
dtype = ivy.int64
return ivy.cumprod(input, axis=dim, dtype=dtype, out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{"2.2 and below": ("uint8", "bfloat16", "float16"), "1.12.1": ()},
"torch",
)
def cumsum(input, dim, *, dtype=None, out=None):
if not dtype and "int" in input.dtype:
dtype = ivy.int64
return ivy.cumsum(input, axis=dim, dtype=dtype, out=out)
@to_ivy_arrays_and_back
def diag(input, diagonal=0, *, out=None):
return ivy.diag(input, k=diagonal)
@to_ivy_arrays_and_back
def diag_embed(
input,
offset=0,
dim1=-2,
dim2=-1,
):
def _handle_dim(rank, idx):
if idx >= 0 and idx < rank:
return idx
if idx < 0:
idx = idx + rank
if idx < 0 or idx >= rank:
raise IndexError
return idx
input_type = ivy.dtype(input)
rank = input.ndim + 1
dim1 = _handle_dim(rank, dim1)
dim2 = _handle_dim(rank, dim2)
if dim1 > dim2:
dim1, dim2 = dim2, dim1
offset = -offset
last_dim = list(input.shape)[-1]
if offset != 0:
# add padding to match the new size
t_shape = list(input.shape)
t_shape[-1] = abs(offset)
z = ivy.zeros(t_shape, dtype=input.dtype, device=input.device)
pair = (z, input) if offset > 0 else (input, z)
input = ivy.concat(pair, axis=-1)
last_dim += abs(offset)
input = input.expand_dims(axis=dim1).moveaxis(-1, dim2)
# generate ranges shifting indices based on offset
a_range = ivy.arange(last_dim, device=input.device, dtype=ivy.int64)
b_range = ivy.arange(
offset, last_dim + offset, device=input.device, dtype=ivy.int64
)
# broadcast
cond = a_range == b_range.expand_dims(axis=-1)
cond_shape = [last_dim if i in (dim1, dim2) else 1 for i in range(len(input.shape))]
cond = cond.reshape(cond_shape)
if input.dtype == ivy.bool:
ret = cond.logical_and(input)
else:
ret = ivy.where(cond, input, 0)
return ret.astype(input_type)
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
@to_ivy_arrays_and_back
def diagflat(x, offset=0, name=None):
arr = ivy.diagflat(x, offset=offset)
return arr
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def diagonal(input, offset=0, dim1=0, dim2=1):
return ivy.diagonal(input, offset=offset, axis1=dim1, axis2=dim2)
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{"2.2 and below": ("int8", "float16", "bfloat16", "bool")}, "torch"
)
def diff(input, n=1, dim=-1, prepend=None, append=None):
return ivy.diff(input, n=n, axis=dim, prepend=prepend, append=append, out=None)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def einsum(equation, *operands):
if len(operands) == 1 and isinstance(operands[0], (list, tuple)):
operands = operands[0]
return ivy.einsum(equation, *operands)
@to_ivy_arrays_and_back
def finfo(dtype):
return ivy.finfo(dtype)
@to_ivy_arrays_and_back
def flatten(input, start_dim=0, end_dim=-1):
return ivy.flatten(input, start_dim=start_dim, end_dim=end_dim)
@to_ivy_arrays_and_back
def flip(input, dims):
return ivy.flip(input, axis=dims, copy=True)
@to_ivy_arrays_and_back
def fliplr(input):
ivy.utils.assertions.check_greater(
len(input.shape),
2,
allow_equal=True,
message="requires tensor to be at least 2D",
as_array=False,
)
return ivy.fliplr(input, copy=True)
@to_ivy_arrays_and_back
def flipud(input):
ivy.utils.assertions.check_greater(
len(input.shape),
1,
allow_equal=True,
message="requires tensor to be at least 1D",
as_array=False,
)
return ivy.flipud(input, copy=True)
@to_ivy_arrays_and_back
def gcd(input, other, *, out=None):
return ivy.gcd(input, other, out=out)
@to_ivy_arrays_and_back
def kron(input, other, *, out=None):
return ivy.kron(input, other, out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("int8",)}, "torch")
def lcm(input, other, *, out=None):
return ivy.lcm(input, other, out=out)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
"integer",
)
},
"torch",
)
@to_ivy_arrays_and_back
def logcumsumexp(input, dim, *, out=None):
if len(input.shape) == 0:
ret = input
else:
# For numerical stability, cast to float64
# We cast back to the original type at the end.
original_dtype = input.dtype
exp_input = ivy.exp(input.astype("float64"))
summed_exp_input = ivy.cumsum(exp_input, axis=dim)
ret = ivy.log(summed_exp_input).astype(original_dtype)
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
@to_ivy_arrays_and_back
def lu_solve(b, LU_data, LU_pivots, *, out=None):
return torch_frontend.linalg.lu_solve(LU_data, LU_pivots, b, out=out)
@to_ivy_arrays_and_back
def meshgrid(*tensors, indexing=None):
if indexing is None:
indexing = "ij"
if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)):
tensors = tensors[0]
return tuple(ivy.meshgrid(*tensors, indexing=indexing))
@to_ivy_arrays_and_back
def ravel(input):
return ivy.reshape(input, (-1,))
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@to_ivy_arrays_and_back
def renorm(input, p, dim, maxnorm, *, out=None):
# Torch hardcodes this magic number
epsilon = 1e-07
# To iterate through the n-th dimension of `input`, it is easiest to swap
# the dimension that we wish to iterate through to be first, then iterate
# through the re-ordered data. This re-ordering is fine for our purposes
# as we calculate the p-norms and they are all order agnostic. That is,
# we may re-order the elements of any vector, and as long as none are
# added, edited, or removed, the p-norm will be the same.
input_swapped = ivy.swapaxes(input, 0, dim)
individual_tensors = [input_swapped[i, ...] for i in range(input_swapped.shape[0])]
ret = []
for individual_tensor in individual_tensors:
# These tensors may be multidimensional, but must be treated as a single vector.
original_shape = individual_tensor.shape
tensor_flattened = ivy.flatten(individual_tensor)
# Don't scale up to the maximum norm, only scale down to it.
norm = ivy.vector_norm(tensor_flattened, axis=0, ord=p)
multiplier = ivy.minimum(maxnorm / (norm + epsilon), ivy.ones_like(norm))
# Store the result in its original shape
ret.append(
ivy.reshape(ivy.multiply(tensor_flattened, multiplier), original_shape)
)
# We must undo our axis swap from the start.
ret = ivy.asarray(ret, dtype=ret[0].dtype)
ret = ivy.swapaxes(ret, 0, dim)
ret = ivy.reshape(ret, input.shape)
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
@with_supported_dtypes(
{
"2.2 and below": (
"int32",
"int64",
)
},
"torch",
)
@to_ivy_arrays_and_back
def repeat_interleave(input, repeats, dim=None, *, output_size=None):
return ivy.repeat(input, repeats, axis=dim)
@to_ivy_arrays_and_back
def roll(input, shifts, dims=None):
return ivy.roll(input, shifts, axis=dims)
@to_ivy_arrays_and_back
def rot90(input, k, dims):
total_dims = ivy.get_num_dims(input)
total_rot_dims = len(dims)
ivy.utils.assertions.check_greater(
total_dims,
2,
allow_equal=True,
message="expected total dims >= 2, but got total dims = " + str(total_dims),
as_array=False,
)
ivy.utils.assertions.check_equal(
total_rot_dims,
2,
message="expected total rotation dims == 2, but got dims = "
+ str(total_rot_dims),
as_array=False,
)
ivy.utils.assertions.check_equal(
dims[0],
dims[1],
inverse=True,
message="expected rotation dims to be different, but got dim0 = "
+ str(dims[0])
+ " and dim1 = "
+ str(dims[1]),
as_array=False,
)
ivy.utils.assertions.check_equal(
ivy.abs(dims[0] - dims[1]),
total_dims,
inverse=True,
message="expected rotation dims to be different, but got dim0 = "
+ str(dims[0])
+ " and dim1 = "
+ str(dims[1]),
)
# range of dims
ivy.utils.assertions.check_less(
dims[0],
total_dims,
message="Rotation dim0 out of range, dim0 = " + str(dims[0]),
as_array=False,
)
ivy.utils.assertions.check_greater(
dims[0],
-total_dims,
allow_equal=True,
message="Rotation dim0 out of range, dim0 = " + str(dims[0]),
as_array=False,
)
ivy.utils.assertions.check_less(
dims[1],
total_dims,
message="Rotation dim1 out of range, dim1 = " + str(dims[1]),
as_array=False,
)
ivy.utils.assertions.check_greater(
dims[1],
-total_dims,
allow_equal=True,
message="Rotation dim1 out of range, dim1 = " + str(dims[1]),
as_array=False,
)
k = (4 + (k % 4)) % 4
new_axes = list(range(total_dims))
new_axes[min(dims)], new_axes[max(dims)] = max(dims), min(dims)
if k == 1:
flipped = ivy.flip(input, axis=dims[1])
return ivy.permute_dims(flipped, axes=new_axes, copy=True)
elif k == 2:
return ivy.flip(input, axis=dims, copy=True)
elif k == 3:
flipped = ivy.flip(input, axis=dims[0])
return ivy.permute_dims(flipped, axes=new_axes, copy=True)
else:
return input
@to_ivy_arrays_and_back
def searchsorted(
sorted_sequence,
values,
/,
*,
out_int32=False,
right=False,
side=None,
out=None,
sorter=None,
):
if side == "left":
if right:
raise ivy.exceptions.IvyError(
"side and right can't be set to opposites, got side of left"
" while right was True"
)
elif side is None:
side = "right" if right else "left"
ret = ivy.searchsorted(sorted_sequence, values, side=side, out=out, sorter=sorter)
if out_int32:
ret = ivy.astype(ret, "int32")
return ret
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def tensordot(a, b, dims=2, out=None):
a, b = promote_types_of_torch_inputs(a, b)
return ivy.tensordot(a, b, axes=dims, out=out)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def trace(input):
if "int" in input.dtype:
input = input.astype("int64")
target_type = "int64" if "int" in input.dtype else input.dtype
return ivy.astype(ivy.trace(input), target_type)
@with_supported_dtypes({"2.5.0 and below": ("int8", "int16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def tril(input, diagonal=0, *, out=None):
return ivy.tril(input, k=diagonal, out=out)
@with_unsupported_dtypes({"2.2 and below": ("int8", "uint8", "int16")}, "torch")
@to_ivy_arrays_and_back
def tril_indices(row, col, offset=0, *, dtype=ivy.int64, device="cpu", layout=None):
sample_matrix = ivy.tril(ivy.ones((row, col), device=device), k=offset)
return ivy.stack(ivy.nonzero(sample_matrix)).astype(dtype)
@with_supported_dtypes(
{"2.5.0 and below": ("float64", "float32", "int32", "int64")}, "paddle"
)
@to_ivy_arrays_and_back
def triu(input, diagonal=0, *, out=None):
return ivy.triu(input, k=diagonal, out=out)
@to_ivy_arrays_and_back
def triu_indices(row, col, offset=0, dtype="int64", device="cpu", layout=None):
# TODO: Handle layout flag when possible.
sample_matrix = ivy.triu(ivy.ones((row, col), device=device), k=offset)
return ivy.stack(ivy.nonzero(sample_matrix)).astype(dtype)
@to_ivy_arrays_and_back
def unflatten(input, dim, sizes):
return ivy.unflatten(input, dim=dim, shape=sizes, out=None)
@to_ivy_arrays_and_back
def vander(x, N=None, increasing=False):
# if N == 0:
# return ivy.array([], dtype=x.dtype)
# else:
return ivy.vander(x, N=N, increasing=increasing, out=None)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
def view_as_complex(input):
if ivy.shape(input)[-1] != 2:
raise ivy.exceptions.IvyError("The last dimension must have a size of 2")
real, imaginary = ivy.split(
ivy.stop_gradient(input, preserve_type=False),
num_or_size_splits=2,
axis=ivy.get_num_dims(input) - 1,
)
dtype = ivy.complex64 if input.dtype == ivy.float32 else ivy.complex128
real = ivy.squeeze(real, axis=ivy.get_num_dims(real) - 1).astype(dtype)
imag = ivy.squeeze(imaginary, axis=ivy.get_num_dims(imaginary) - 1).astype(dtype)
complex_ = real + imag * 1j
return ivy.array(complex_, dtype=dtype)
@with_supported_dtypes(
{"2.2 and below": ("complex64", "complex128")},
"torch",
)
@to_ivy_arrays_and_back
def view_as_real(input):
if not ivy.is_complex_dtype(input):
raise ivy.exceptions.IvyError(
"view_as_real is only supported for complex tensors"
)
re_part = ivy.real(input)
im_part = ivy.imag(input)
return ivy.stack((re_part, im_part), axis=-1)
| ivy/ivy/functional/frontends/torch/miscellaneous_ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/miscellaneous_ops.py",
"repo_id": "ivy",
"token_count": 9421
} | 40 |
import ivy
from ivy.functional.frontends.torch.tensor import Tensor
import ivy.functional.frontends.torch as torch_frontend
from ivy.functional.ivy.gradients import _variable, _is_variable, _variable_data
class Parameter(Tensor):
def __init__(self, data=None, device=None, requires_grad=True):
if data is None:
data = torch_frontend.empty(0)
ivy_array = (
ivy.array(data) if not hasattr(data, "_ivy_array") else data.ivy_array
)
ivy_array = _variable(ivy_array) if not _is_variable(data) else ivy_array
self._ivy_array = ivy.to_device(ivy_array, device) if device else ivy_array
self._data = Tensor(_variable_data(self._ivy_array), _init_overload=True)
self._requires_grad = requires_grad
self._is_leaf = True
self._grads = None
self.grad_fn = None
def __deepcopy__(self, memo):
if id(self) in memo:
return memo[id(self)]
else:
result = type(self)(self.data.clone(), self.requires_grad)
memo[id(self)] = result
return result
def __repr__(self):
return "Parameter containing:\n" + super().__repr__()
| ivy/ivy/functional/frontends/torch/nn/parameter.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/parameter.py",
"repo_id": "ivy",
"token_count": 520
} | 41 |
from . import coordinate_common
from .coordinate_common import *
from . import updater_coordinate
from .updater_coordinate import *
| ivy/ivy/functional/frontends/xgboost/linear/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/xgboost/linear/__init__.py",
"repo_id": "ivy",
"token_count": 36
} | 42 |
# global
from typing import Union, Optional, Callable, Literal
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.utils.exceptions import handle_exceptions
from ivy.func_wrapper import (
handle_array_function,
handle_nestable,
to_native_arrays_and_back,
handle_array_like_without_promotion,
handle_out_argument,
inputs_to_ivy_arrays,
handle_device,
handle_backend_invalid,
handle_complex_input,
)
def _logit_jax_like(
x: Union[float, int, ivy.Array],
/,
*,
fn_original: Optional[Callable] = None,
eps: Optional[float] = None,
out: Optional[ivy.Array] = None,
):
real = ivy.real(x)
imag = ivy.imag(x)
if eps is None:
real = ivy.where(ivy.logical_or(real > 1, real < 0), ivy.nan, real)
else:
real = ivy.clip(real, eps, 1 - eps)
z = ivy.add(real, ivy.multiply(ivy.array(1j, dtype=x.dtype), imag))
z = ivy.log(z / (1 - z))
return z
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
@handle_complex_input
def logit(
x: Union[float, int, ivy.Array],
/,
*,
eps: Optional[float] = None,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the logit of x.
logit(x) = log(x / (1 - x)).
Parameters
----------
x
Input data.
eps
When eps is None the function outputs NaN where x < 0 or x > 1.
and inf or -inf where x = 1 or x = 0, respectively.
Otherwise if eps is defined, x is clamped to [eps, 1 - eps]
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
Optional output array.
Returns
-------
ret
Array containing elementwise logits of x.
Examples
--------
>>> x = ivy.array([1, 0, 0.9])
>>> z = ivy.logit(x)
>>> print(z)
ivy.array([ inf, -inf, 2.19722438])
>>> x = ivy.array([1, 2, -0.9])
>>> z = ivy.logit(x, eps=0.2)
>>> print(z)
ivy.array([ 1.38629448, 1.38629448, -1.38629436])
"""
return current_backend(x).logit(x, eps=eps, out=out)
logit.jax_like = _logit_jax_like
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_ivy_arrays
def prelu(
x: Union[ivy.NativeArray, ivy.Array],
slope: Union[float, ivy.NativeArray, ivy.Array],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Prelu takes input data (Array) and slope array as input,
and produces one output data (array) where the function
f(x) = slope * x for x < 0, f(x) = x for x >= 0., is applied
to the data array elementwise. This operator supports unidirectional
broadcasting (array slope should be unidirectional broadcastable to
input tensor X);
Parameters
----------
x
Input Array.
slope
Slope Array. The shape of slope can be smaller then first input X;
if so, its shape must be unidirectional broadcastable to X.
out
Optional output array.
Returns
-------
ret
Array containing Parametrized relu values.
"""
try:
return ivy.where(x > 0, x, x * slope, out=out)
except ivy.utils.exceptions.IvyError(
f"The shape {slope.shape} is not Unidirectional Broadcastable\n"
"as per ONNX standards"
) as IvyException:
if len(slope.shape) == 1:
dim = slope.shape[0]
new_shape = []
n = 0
for d in x.shape:
if d == dim:
n += 1
new_shape.append(d)
if n == 1:
xs = x * slope.reshape(tuple(new_shape), out=out)
return ivy.where(x > 0, x, xs, out=out)
raise IvyException
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def thresholded_relu(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
threshold: Union[int, float] = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the rectified linear unit function with custom threshold.
Parameters
----------
x
input array
threshold
threshold value above which the activation is linear. Default: ``0``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the rectified linear unit activation of each element in
``x``. with custom ``threshold``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1., 0., 1.])
>>> y = ivy.thresholded_relu(x, threshold=0.5)
>>> print(y)
ivy.array([0., 0. , 1.])
>>> x = ivy.array([1.5, 0.7, -2.4])
>>> y = ivy.zeros(3)
>>> ivy.thresholded_relu(x, threshold=1, out = y)
>>> print(y)
ivy.array([ 1.5, 0., 0.])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.2, 0.6]))
>>> x = ivy.thresholded_relu(x, threshold=0.5)
>>> print(x)
{
a: ivy.array([1., 0.]),
b: ivy.array([0., 0.6])
}
"""
return current_backend(x).thresholded_relu(x, threshold=threshold, out=out)
def _relu6_jax_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
fn_original=None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
return ivy.where(
ivy.logical_or(
ivy.real(x) < 0, ivy.logical_and(ivy.real(x) == 0, ivy.imag(x) < 0)
),
ivy.array(0, dtype=x.dtype),
ivy.where(
ivy.logical_or(
ivy.real(x) > 6, ivy.logical_and(ivy.real(x) == 6, ivy.imag(x) > 0)
),
ivy.array(6, dtype=x.dtype),
x,
),
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
@handle_complex_input
def relu6(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the rectified linear unit 6 function element-wise.
Parameters
----------
x
input array
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the rectified linear unit 6 activation of each element in
``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1., 0., 1., 2., 3., 4., 5., 6., 7.])
>>> y = ivy.relu6(x)
>>> print(y)
ivy.array([0., 0., 1., 2., 3., 4., 5., 6., 6.])
>>> x = ivy.array([-1., 0., 1., 2., 3., 4., 5., 6., 7.])
>>> y = ivy.zeros(9)
>>> ivy.relu6(x, out = y)
>>> print(y)
ivy.array([0., 0., 1., 2., 3., 4., 5., 6., 6.])
"""
return current_backend(x).relu6(x, out=out)
relu6.jax_like = _relu6_jax_like
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
@handle_complex_input
def logsigmoid(
input: Union[ivy.NativeArray, ivy.Array],
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply element-wise Log-sigmoid of x.
logsigmoid(x) = log(1 / (1 + exp(-x)).
Parameters
----------
input
Input array.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
Returns
-------
Array with same shape as input with Log-sigmoid applied to every element.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1., 0., 1.])
>>> z = x.logsigmoid()
>>> print(z)
ivy.array([-1.31326175, -0.69314718, -0.31326169])
>>> x = ivy.array([1.5, 0.7, -2.4])
>>> z = x.logsigmoid()
>>> print(z)
ivy.array([-0.20141329, -0.40318608, -2.48683619])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.2, 0.6]))
>>> x = ivy.logsigmoid(x)
>>> print(x)
{
a: ivy.array([-0.31326169, -1.46328247]),
b: ivy.array([-0.59813893, -0.43748799])
}
"""
return ivy.current_backend(input).logsigmoid(input, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def selu(
x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Apply the scaled exponential linear unit function element-wise.
Parameters
----------
x
input array
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the scaled exponential linear unit activation of each
element in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1., 0., 1., 2., 3., 4., 5., 6., 7.])
>>> y = ivy.selu(x)
>>> print(y)
ivy.array([-1.11133075, 0. , 1.05070102, 2.10140204, 3.15210295,
4.20280409, 5.25350523, 6.30420589, 7.35490704])
>>> x = ivy.array([-1., 0., 1., 2., 3., 4., 5., 6., 7.])
>>> y = ivy.zeros(9)
>>> ivy.selu(x, out = y)
>>> print(y)
ivy.array([-1.11133075, 0. , 1.05070102, 2.10140204, 3.15210295,
4.20280409, 5.25350523, 6.30420589, 7.35490704])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-3., -2., -1., 0., 1., 2., 3., 4., 5.]),
... b=ivy.array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
... )
>>> x = ivy.selu(x, out=x)
>>> print(x)
{
a: ivy.array([-1.6705687, -1.52016652, -1.11133075, 0., 1.05070102,
2.10140204, 3.15210295, 4.20280409, 5.25350523]),
b: ivy.array([1.05070102, 2.10140204, 3.15210295, 4.20280409, 5.25350523,
6.30420589, 7.35490704, 8.40560818, 9.45630932])
}
"""
return current_backend(x).selu(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def silu(
x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Apply the silu function element-wise.
Parameters
----------
x
input array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the silu activation of each element in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = ivy.silu(x)
>>> print(y)
ivy.array([-0.2689, 0.7310, 1.7615])
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = x.silu()
>>> print(y)
ivy.array([-0.2689, 0.7310, 1.7615])
>>> x = ivy.array([[-1.3, 3.8, 2.1], [1.7, 4.2, -6.6]])
>>> y = ivy.silu(x)
>>> print(y)
ivy.array([[-0.2784, 3.7168, 1.8708], [ 1.4374, 4.1379, -0.0089]])
"""
return current_backend(x).silu(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def elu(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
alpha: float = 1.0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the elu unit function element-wise.
Parameters
----------
x
Input array.
alpha
scaler for controlling the slope of the function for x <= 0 Default: 1.0
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The input array with elu applied element-wise.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0.39, -0.85])
>>> y = ivy.elu(x)
>>> print(y)
ivy.array([ 0.38999999, -0.57258511])
>>> x = ivy.array([1.5, 0.7, -2.4])
>>> y = ivy.zeros(3)
>>> ivy.elu(x, out=y)
>>> print(y)
ivy.array([ 1.5, 0.69999999, -0.90928203])
>>> x = ivy.array([[1.1, 2.2, 3.3],
... [-4.4, -5.5, -6.6]])
>>> ivy.elu(x, out=x)
>>> print(x)
ivy.array([[ 1.10000002, 2.20000005, 3.29999995],
[-0.98772264, -0.99591321, -0.99863964]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> x = ivy.elu(x, out=x)
>>> print(x)
{
a: ivy.array([0., -0.69880581]),
b: ivy.array([0.40000001, -0.18126924])
}
"""
return current_backend(x).elu(x, alpha=alpha, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def hardtanh(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
max_val: float = 1,
min_val: float = -1,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the hardtanh unit function element-wise.
Parameters
----------
x
Input array.
min_val
minimum value of the linear region range. Default: -1.
max_val
maximum value of the linear region range. Default: 1.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The input array with elu applied element-wise.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0.39, -0.85])
>>> y = ivy.hardtanh(x)
>>> print(y)
ivy.array([ 0.39, -0.85])
>>> x = ivy.array([1.5, 0.7, -2.4])
>>> y = ivy.zeros(3)
>>> ivy.hardtanh(x, out=y)
>>> print(y)
ivy.array([ 1., 0.7, -1.])
>>> x = ivy.array([[1.1, 2.2, 3.3],[-0.4, 0.5, -6.6]])
>>> ivy.hardtanh(x, out=x)
>>> print(x)
ivy.array([[ 1., 1., 1.],[-0.4, 0.5, -1.]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> x = ivy.hardtanh(x, out=x)
>>> print(x)
{
a: ivy.array([0., -1.]),
b: ivy.array([0.4, -0.2])
}
"""
return current_backend(x).hardtanh(x, max_val=max_val, min_val=min_val, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def tanhshrink(
x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Apply the tanhshrink function element-wise.
Parameters
----------
x
input array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the tanhshrink activation of each element in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = ivy.tanhshrink(x)
>>> print(y)
ivy.array([-0.23840582, 0.23840582, 1.03597236])
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = x.tanhshrink()
>>> print(y)
ivy.array([-0.23840582, 0.23840582, 1.03597236])
>>> x = ivy.array([[-1.3, 3.8, 2.1], [1.7, 4.2, -6.6]])
>>> y = ivy.tanhshrink(x)
>>> print(y)
ivy.array([[-0.43827677, 2.80100036, 1.12954807],
[ 0.76459098, 3.20044947, -5.60000372]])
"""
return current_backend(x).tanhshrink(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def softshrink(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
lambd: float = 0.5,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the softshrink function element-wise.
Parameters
----------
x
input array.
lambd
the value of the lower bound of the linear region range.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the softshrink activation of each element in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = ivy.softshrink(x)
>>> print(y)
ivy.array([-0.5, 0.5, 1.5])
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = x.softshrink()
>>> print(y)
ivy.array([-0.5, 0.5, 1.5])
>>> x = ivy.array([[-1.3, 3.8, 2.1], [1.7, 4.2, -6.6]])
>>> y = ivy.softshrink(x)
>>> print(y)
ivy.array([[-0.79999995, 3.29999995, 1.59999991],
[ 1.20000005, 3.69999981, -6.0999999 ]])
"""
return current_backend(x).softshrink(x, lambd=lambd, out=out)
def _celu_jax_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
fn_original: Optional[Callable] = None,
alpha: float = 1.0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
# implementation of max(0, x) for complex numbers
complex_max = ivy.where(
(
ivy.logical_or(
ivy.real(x) < 0, ivy.logical_and(ivy.real(x) == 0, ivy.imag(x) < 0)
)
),
ivy.astype(0.0, x.dtype),
x,
)
# implementation of min(0, x) for complex numbers
complex_min = ivy.where(
(
ivy.logical_or(
ivy.real(x) < 0, ivy.logical_and(ivy.real(x) == 0, ivy.imag(x) < 0)
)
),
x,
ivy.astype(0.0, x.dtype),
)
return complex_max + alpha * ivy.expm1(complex_min / alpha)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def threshold(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
threshold: float,
value: float,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the threshold function element-wise.
Parameters
----------
x
input array.
threshold
The value to threshold at.
value
The value to replace with.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the threshold activation of each element in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = ivy.threshold(x,value=0.0, threshold=1.5)
>>> print(y)
ivy.array([0., 0., 2.])
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> x.threshold(value=0.0, threshold=1.5)
>>> print(y)
ivy.array([0., 0., 2.])
>>> x = ivy.array([[-1.3, 3.8, 2.1], [1.7, 4.2, -6.6]])
>>> y = ivy.threshold(x, value=0.0, threshold=1.5)
>>> print(y)
ivy.array([[0. , 3.79999995, 2.0999999 ],
[1.70000005, 4.19999981, 0. ]])
"""
return current_backend(x).threshold(x, threshold=threshold, value=value, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
@handle_complex_input
def celu(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
alpha: float = 1.0,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the Continuously Differentiable Exponential Linear Unit (CELU)
activation function to each element of the input.
Parameters
----------
x
Input array.
alpha
The alpha value (negative slope) for the CELU formulation. Default is ``1.0``
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The input array with celu applied element-wise.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0.39, -0.85])
>>> y = ivy.celu(x)
>>> y
ivy.array([ 0.39, -0.57])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.39, -0.85]), b=ivy.array([1., -0.2]))
>>> y = ivy.celu(x)
>>> y
{
a: ivy.array([0.38999999, -0.57]),
b: ivy.array([1., -0.18])
}
"""
return current_backend(x).celu(x, alpha=alpha, out=out)
celu.jax_like = _celu_jax_like
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def scaled_tanh(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
alpha: float = 1.7159,
beta: float = 0.67,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the scaled hyperbolic tangent (tanh) activation.
The scaled tanh activation function is defined as:
out = alpha * tanh(beta * x)
Parameters
----------
x
input array.
alpha
The scaling parameter for the output.
Determines the amplitude of the tanh function.
Default: 1.7159
beta
The scaling parameter for the input.
Determines the slope of the tanh function.
Default: 0.67
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The input array after applying the scaled tanh activation.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([22.])
>>> y = ivy.scaled_tanh(x)
>>> y
ivy.array([1.71589994]))
>>> x = ivy.array([4.0, 7.0])
>>> y = ivy.scaled_tanh(x, alpha=1.2, beta=5)
>>> y
ivy.array([1.20000005, 1.20000005])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.2, -1.2]), b=ivy.array([4.4, -2.2]))
>>> y = ivy.scaled_tanh(x)
>>> y
{
a: ivy.array([1.14324772, -1.14324772]),
b: ivy.array([1.70648694, -1.54488957])
}
>>> x = ivy.Container(a=ivy.array([1.2]), b=ivy.array([4.4]))
>>> y = ivy.scaled_tanh(x, alpha=0.2, beta=0.5)
>>> y
{
a: ivy.array([0.10740992]),
b: ivy.array([0.19514863])
}
"""
return current_backend(x).scaled_tanh(x, alpha=alpha, beta=beta, out=out)
stanh = scaled_tanh
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def hardshrink(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
lambd: float = 0.5,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the hardshrink function element-wise.
Parameters
----------
x
input array.
lambd
the value for the Hardshrink formulation.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the hardshrink activation of each element in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = ivy.hardshrink(x)
>>> print(y)
ivy.array([-1., 1., 2.])
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = x.hardshrink()
>>> print(y)
ivy.array([-1., 1., 2.])
>>> x = ivy.array([[-1.3, 3.8, 2.1], [1.7, 4.2, -6.6]])
>>> y = ivy.hardshrink(x)
>>> print(y)
ivy.array([[-1.29999995, 3.79999995, 2.0999999 ],
[ 1.70000005, 4.19999981, -6.5999999 ]])
"""
return current_backend(x).hardshrink(x, lambd=lambd, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def hardsilu(
x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Apply the hardsilu/hardswish function element-wise.
Parameters
----------
x
input array
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
an array containing the output of the hardsilu/hardswish function applied
to each element in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1., 2., 3.])
>>> y = ivy.hardsilu(x)
>>> print(y)
ivy.array([0.66666669, 1.66666663, 3. ])
>>> x = ivy.array([-2.1241, 1.4897, 4.4090])
>>> y = ivy.zeros(3)
>>> ivy.hardsilu(x, out=y)
>>> print(y)
ivy.array([-0.31008321, 1.1147176 , 4.40899992])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-0.5, -1, 0]), b=ivy.array([0.5, 1., 2]))
>>> y = ivy.hardsilu(x)
>>> print(y)
{
a: ivy.array([-0.20833333, -0.33333334, 0.]),
b: ivy.array([0.29166666, 0.66666669, 1.66666663])
}
"""
return current_backend(x).hardsilu(x, out=out)
| ivy/ivy/functional/ivy/experimental/activations.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/activations.py",
"repo_id": "ivy",
"token_count": 12306
} | 43 |
from typing import Optional, Union, Tuple
import ivy
from ivy.func_wrapper import (
handle_out_argument,
to_native_arrays_and_back,
handle_nestable,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def unravel_index(
indices: Union[ivy.Array, ivy.NativeArray],
shape: Tuple[int],
/,
*,
out: Optional[ivy.Array] = None,
) -> Tuple[ivy.Array]:
"""Convert a flat index or array of flat indices into a tuple of coordinate
arrays.
Parameters
----------
indices
Input array.
shape
The shape of the array to use for unraveling indices.
out
optional output array, for writing the result to.
Returns
-------
ret
Tuple with arrays of type int32 that have the same shape as the indices array.
Examples
--------
>>> indices = ivy.array([22, 41, 37])
>>> ivy.unravel_index(indices, (7,6))
(ivy.array([3, 6, 6]), ivy.array([4, 5, 1]))
"""
return ivy.current_backend(indices).unravel_index(indices, shape, out=out)
| ivy/ivy/functional/ivy/experimental/searching.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/searching.py",
"repo_id": "ivy",
"token_count": 471
} | 44 |
# global
from numbers import Number
from typing import Union, Optional, Tuple
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.utils.exceptions import handle_exceptions
from ivy.func_wrapper import (
handle_array_function,
to_native_arrays_and_back,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_device,
handle_backend_invalid,
)
# Array API Standard #
# -------------------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def argmax(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
select_last_index: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the indices of the maximum values along a specified axis. When
the maximum value occurs multiple times, only the indices corresponding to
the first occurrence are returned.
Parameters
----------
x
input array. Should have a numeric data type.
axis
axis along which to search. If None, the function must return the index of the
maximum value of the flattened array. Default = None.
keepdims
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast correctly
against the array.
dtype
Optional data type of the output array.
select_last_index
If this is set to True, the index corresponding to the
last occurrence of the maximum value will be returned
out
If provided, the result will be inserted into this array. It should be of the
appropriate shape and dtype.
Returns
-------
ret
if axis is None, a zero-dimensional array containing the index of the first
occurrence of the maximum value; otherwise, a non-zero-dimensional array
containing the indices of the maximum values. The returned array must have be
the default array index data type.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.argmax.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-0., 1., -1.])
>>> y = ivy.argmax(x)
>>> print(y)
ivy.array([1])
>>> x = ivy.array([-0., 1., -1.])
>>> z = ivy.zeros((1,3), dtype=ivy.int64)
>>> ivy.argmax(x, out=z)
>>> print(z)
ivy.array(1)
>>> x = ivy.array([[1., -0., -1.], [-2., 3., 2.]])
>>> y = ivy.argmax(x, axis=1)
>>> print(y)
ivy.array([0, 1])
>>> x = ivy.array([[4., 0., -1.], [2., -3., 6]])
>>> y = ivy.argmax(x, axis=1, keepdims=True)
>>> print(y)
ivy.array([[0], [2]])
>>> x = ivy.array([[4., 0., -1.], [2., -3., 6]])
>>> y = ivy.argmax(x, axis=1, dtype=ivy.int64)
>>> print(y, y.dtype)
ivy.array([0, 2]) int64
>>> x = ivy.array([[4., 0., -1.],[2., -3., 6], [2., -3., 6]])
>>> z = ivy.zeros((3,1), dtype=ivy.int64)
>>> y = ivy.argmax(x, axis=1, keepdims=True, out=z)
>>> print(z)
ivy.array([[0],[2],[2]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., -1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.argmax(x)
>>> print(y)
{
a: ivy.array(2),
b: ivy.array(2)
}
"""
return current_backend(x).argmax(
x,
axis=axis,
keepdims=keepdims,
dtype=dtype,
select_last_index=select_last_index,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def argmin(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
select_last_index: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the indices of the minimum values along a specified axis. When
the minimum value occurs multiple times, only the indices corresponding to
the first occurrence are returned.
Parameters
----------
x
input array. Should have a numeric data type.
axis
axis along which to search. If None, the function must return the index of the
minimum value of the flattened array. Default = None.
keepdims
if True, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with the
input array (see Broadcasting). Otherwise, if False, the reduced axes
(dimensions) must not be included in the result. Default = False.
dtype
An optional output_dtype from: int32, int64. Defaults to int64.
select_last_index
If this is set to True, the index corresponding to the
last occurrence of the maximum value will be returned.
out
if axis is None, a zero-dimensional array containing the index of the first
occurrence of the minimum value; otherwise, a non-zero-dimensional array
containing the indices of the minimum values. The returned array must have the
default array index data type.
Returns
-------
ret
Array containing the indices of the minimum values across the specified axis.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.argmin.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0., 1., -1.])
>>> y = ivy.argmin(x)
>>> print(y)
ivy.array(2)
>>> x = ivy.array([[0., 1., -1.],[-2., 1., 2.]])
>>> y = ivy.argmin(x, axis=1)
>>> print(y)
ivy.array([2, 0])
>>> x = ivy.array([[0., 1., -1.],[-2., 1., 2.]])
>>> y = ivy.argmin(x, axis=1, keepdims=True)
>>> print(y)
ivy.array([[2],
[0]])
>>> x = ivy.array([[0., 1., -1.],[-2., 1., 2.],[1., -2., 0.]])
>>> y= ivy.zeros((3,1), dtype=ivy.int64)
>>> ivy.argmin(x, axis=1, keepdims=True, out=y)
>>> print(y)
ivy.array([[2],
[0],
[1]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., -1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.argmin(x)
>>> print(y)
{
a: ivy.array(1),
b: ivy.array(0)
}
"""
return current_backend(x).argmin(
x,
axis=axis,
keepdims=keepdims,
dtype=dtype,
select_last_index=select_last_index,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_array_function
@handle_device
def nonzero(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
as_tuple: bool = True,
size: Optional[int] = None,
fill_value: Number = 0,
) -> Union[Tuple[ivy.Array], ivy.Array]:
"""Return the indices of the array elements which are non-zero.
.. note::
If ``x`` has a complex floating-point data type, non-zero elements
are those elements having at least one component (real or imaginary)
which is non-zero.
.. note::
If ``x`` has a boolean data type, non-zeroelements are those elements
which are equal to ``True``.
Parameters
----------
x
input array. Must have a positive rank. If `x` is zero-dimensional, the function
must raise an exception.
as_tuple
if True, the output is returned as a tuple of indices, one for each
dimension of the input, containing the indices of the true elements in that
dimension. If False, the coordinates are returned in a (N, ndim) array,
where N is the number of true elements. Default = True.
size
if specified, the function will return an array of shape (size, ndim).
If the number of non-zero elements is fewer than size, the remaining elements
will be filled with fill_value. Default = None.
fill_value
when size is specified and there are fewer than size number of elements,
the remaining elements in the output array will be filled with fill_value.
Default = 0.
Returns
-------
ret
a tuple of `k` arrays, one for each dimension of `x` and each of size `n`
(where `n` is the total number of non-zero elements), containing the indices of
the non-zero elements in that dimension. The indices must be returned in
row-major, C-style order. The returned array must have the default array index
data type.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.nonzero.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0, 10, 15, 20, -50, 0])
>>> y = ivy.nonzero(x)
>>> print(y)
(ivy.array([1, 2, 3, 4]),)
>>> x = ivy.array([[1, 2], [-1, -2]])
>>> y = ivy.nonzero(x)
>>> print(y)
(ivy.array([0, 0, 1, 1]), ivy.array([0, 1, 0, 1]))
>>> x = ivy.array([[0, 2], [-1, -2]])
>>> y = ivy.nonzero(x, as_tuple=False)
>>> print(y)
ivy.array([[0, 1], [1, 0], [1, 1]])
>>> x = ivy.array([0, 1])
>>> y = ivy.nonzero(x, size=2, fill_value=4)
>>> print(y)
(ivy.array([1, 4]),)
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[10, 20], [10, 0], [0, 0]])
>>> y = ivy.nonzero(x)
>>> print(y)
(ivy.array([0, 0, 1]), ivy.array([0, 1, 0]))
>>> x = ivy.native_array([[0], [1], [1], [0], [1]])
>>> y = ivy.nonzero(x)
>>> print(y)
(ivy.array([1, 2, 4]), ivy.array([0, 0, 0]))
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0,1,2,3,0]), b=ivy.array([1,1, 0,0]))
>>> y = ivy.nonzero(x)
>>> print(y)
[{
a: ivy.array([1, 2, 3]),
b: ivy.array([0, 1])
}]
Instance Method Examples
~~~~~~~~~~~~~~~~~~~~~~~~
With :class:`ivy.Array` instance method:
>>> x = ivy.array([0,0,0,1,1,1])
>>> y = x.nonzero()
>>> print(y)
(ivy.array([3, 4, 5]),)
With :class:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([1,1,1]), b=ivy.native_array([0]))
>>> y = x.nonzero()
>>> print(y)
[{
a: ivy.array([0, 1, 2]),
b: ivy.array([])
}]
"""
return current_backend(x).nonzero(
x, as_tuple=as_tuple, size=size, fill_value=fill_value
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def where(
condition: Union[ivy.Array, ivy.NativeArray],
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return elements chosen from x or y depending on condition.
Parameters
----------
condition
Where True, yield x1, otherwise yield x2.
x1
values from which to choose when condition is True.
x2
values from which to choose when condition is False.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
An array with elements from x1 where condition is True, and elements from x2
elsewhere.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.where.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> condition = ivy.array([[True, False], [True, True]])
>>> x1 = ivy.array([[1, 2], [3, 4]])
>>> x2 = ivy.array([[5, 6], [7, 8]])
>>> res = ivy.where(condition, x1, x2)
>>> print(res)
ivy.array([[1, 6],
[3, 4]])
>>> x1 = ivy.array([[6, 13, 22, 7, 12], [7, 11, 16, 32, 9]])
>>> x2 = ivy.array([[44, 20, 8, 35, 9], [98, 23, 43, 6, 13]])
>>> res = ivy.where(((x1 % 2 == 0) & (x2 % 2 == 1)), x1, x2)
>>> print(res)
ivy.array([[44, 20, 8, 35, 12],
[98, 23, 16, 6, 13]])
With :class:`ivy.Container` input:
>>> x1 = ivy.Container(a=ivy.array([3, 1, 5]), b=ivy.array([2, 4, 6]))
>>> x2 = ivy.Container(a=ivy.array([0, 7, 2]), b=ivy.array([3, 8, 5]))
>>> condition = x1.a > x2.a
>>> res = x1.where(condition, x2)
>>> print(res)
{
a: ivy.array([1, 0, 1]),
b: ivy.array([1, 0, 1])
}
"""
return current_backend(x1).where(condition, x1, x2, out=out)
# Extra #
# ------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def argwhere(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the indices of all non-zero elements of the input array.
Parameters
----------
x
input array, for which indices are desired.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Indices of non-zero elements.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1, 2], [3, 4]])
>>> res = ivy.argwhere(x)
>>> print(res)
ivy.array([[0, 0], [0, 1], [1, 0], [1, 1]])
>>> x = ivy.array([[0, 2], [3, 4]])
>>> res = ivy.argwhere(x)
>>> print(res)
ivy.array([[0, 1], [1, 0], [1, 1]])
>>> x = ivy.array([[0, 2], [3, 4]])
>>> y = ivy.zeros((3, 2), dtype=ivy.int64)
>>> res = ivy.argwhere(x, out=y)
>>> print(res)
ivy.array([[0, 1], [1, 0], [1, 1]])
With a :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2]), b=ivy.array([3, 4]))
>>> res = ivy.argwhere(x)
>>> print(res)
{
a: ivy.array([[0], [1]]),
b: ivy.array([[0], [1]])
}
>>> x = ivy.Container(a=ivy.array([1, 0]), b=ivy.array([3, 4]))
>>> res = ivy.argwhere(x)
>>> print(res)
{
a: ivy.array([[0]]),
b: ivy.array([[0], [1]])
}
"""
return current_backend(x).argwhere(x, out=out)
| ivy/ivy/functional/ivy/searching.py/0 | {
"file_path": "ivy/ivy/functional/ivy/searching.py",
"repo_id": "ivy",
"token_count": 6701
} | 45 |
from . import backend
from . import dynamic_import
from .dynamic_import import *
from .binaries import *
| ivy/ivy/utils/__init__.py/0 | {
"file_path": "ivy/ivy/utils/__init__.py",
"repo_id": "ivy",
"token_count": 28
} | 46 |
# TODO should this still be here?
import termcolor
level = 0
def cprint(message, color="green"):
print(termcolor.colored(message, color))
| ivy/ivy/utils/verbosity.py/0 | {
"file_path": "ivy/ivy/utils/verbosity.py",
"repo_id": "ivy",
"token_count": 47
} | 47 |
"""A state holder for testing, this is only intended to hold and store testing
data to be used by the test helpers to prune unsupported data.
Should not be used inside any of the test functions.
"""
from dataclasses import dataclass
from .pipeline_helper import get_frontend_config
# needed for multiversion
available_frameworks = [
"numpy",
"jax",
"tensorflow",
"torch",
"paddle",
"mxnet",
"scipy",
]
mod_frontend = {
"tensorflow": None,
"numpy": None,
"jax": None,
"torch": None,
"mindspore": None,
"scipy": None,
"paddle": None,
} # multiversion
mod_backend = {
"numpy": None,
"jax": None,
"tensorflow": None,
"torch": None,
"paddle": None,
"mxnet": None,
} # multiversion
# This is used to make sure the variable is not being overridden
_Notsetval = object()
CURRENT_GROUND_TRUTH_BACKEND: callable = _Notsetval
CURRENT_BACKEND: callable = _Notsetval
CURRENT_FRONTEND: callable = _Notsetval
CURRENT_FRONTEND_CONFIG: _Notsetval
CURRENT_RUNNING_TEST = _Notsetval
CURRENT_DEVICE = _Notsetval
CURRENT_DEVICE_STRIPPED = _Notsetval
CURRENT_FRONTEND_STR = None
CURRENT_TRACED_DATA = {}
@dataclass(frozen=True) # ToDo use kw_only=True when version is updated
class TestData:
test_fn: callable
fn_tree: str
fn_name: str
supported_device_dtypes: dict = None
is_method: bool = False
class InterruptedTest(BaseException):
"""Indicate that a test tried to write global attributes while a test is
running."""
def __init__(self, test_interrupted):
super().__init__(f"{test_interrupted} was interrupted during execution.")
# Setup
def setup_api_test(
backend: str,
ground_truth_backend: str,
device: str,
test_data: TestData = None,
):
if test_data is not None:
_set_test_data(test_data)
if ground_truth_backend is not None:
_set_ground_truth_backend(ground_truth_backend)
_set_backend(backend)
_set_device(device)
def teardown_api_test():
_unset_test_data()
_unset_ground_truth_backend()
_unset_backend()
_unset_device()
def setup_frontend_test(frontend: str, backend: str, device: str, test_data: TestData):
if test_data is not None:
_set_test_data(test_data)
_set_frontend(frontend)
_set_backend(backend)
_set_device(device)
def teardown_frontend_test():
_unset_test_data()
_unset_frontend()
_unset_backend()
_unset_device()
def _set_test_data(test_data: TestData):
global CURRENT_RUNNING_TEST
if CURRENT_RUNNING_TEST is not _Notsetval:
raise InterruptedTest(CURRENT_RUNNING_TEST)
CURRENT_RUNNING_TEST = test_data
def _set_frontend(framework: str):
global CURRENT_FRONTEND
global CURRENT_FRONTEND_CONFIG
if CURRENT_FRONTEND is not _Notsetval:
raise InterruptedTest(CURRENT_RUNNING_TEST)
CURRENT_FRONTEND_CONFIG = get_frontend_config(framework)
CURRENT_FRONTEND = framework
def _set_backend(framework: str):
global CURRENT_BACKEND
if CURRENT_BACKEND is not _Notsetval:
raise InterruptedTest(CURRENT_RUNNING_TEST)
CURRENT_BACKEND = framework
def _set_ground_truth_backend(framework: str):
global CURRENT_GROUND_TRUTH_BACKEND
if CURRENT_GROUND_TRUTH_BACKEND is not _Notsetval:
raise InterruptedTest(CURRENT_RUNNING_TEST)
CURRENT_GROUND_TRUTH_BACKEND = framework
def _set_device(device: str):
global CURRENT_DEVICE, CURRENT_DEVICE_STRIPPED
if CURRENT_DEVICE is not _Notsetval or CURRENT_DEVICE_STRIPPED is not _Notsetval:
raise InterruptedTest(CURRENT_RUNNING_TEST)
CURRENT_DEVICE = device
CURRENT_DEVICE_STRIPPED = device.partition(":")[0]
# Teardown
def _unset_test_data():
global CURRENT_RUNNING_TEST
CURRENT_RUNNING_TEST = _Notsetval
def _unset_frontend():
global CURRENT_FRONTEND, CURRENT_FRONTEND_CONFIG
CURRENT_FRONTEND = _Notsetval
CURRENT_FRONTEND_CONFIG = _Notsetval
def _unset_backend():
global CURRENT_BACKEND
CURRENT_BACKEND = _Notsetval
def _unset_ground_truth_backend():
global CURRENT_GROUND_TRUTH_BACKEND
CURRENT_GROUND_TRUTH_BACKEND = _Notsetval
def _unset_device():
global CURRENT_DEVICE, CURRENT_DEVICE_STRIPPED
CURRENT_DEVICE = _Notsetval
CURRENT_DEVICE_STRIPPED = _Notsetval
| ivy/ivy_tests/test_ivy/helpers/globals.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/globals.py",
"repo_id": "ivy",
"token_count": 1760
} | 48 |
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler
from ivy.functional.frontends.jax import vmap
from hypothesis import strategies as st
import jax
# --- Helpers --- #
# --------------- #
def _fn1(x, y):
return ivy.matmul(x, y)
def _fn2(x, y):
return ivy.vecdot(x, y)
def _fn3(x, y):
return ivy.add(x, y)
# --- Main --- #
# ------------ #
# device_get
@handle_frontend_test(
fn_tree="jax.general_functions.device_get",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_jax_device_get(
*,
dtype_and_x,
test_flags,
fn_tree,
frontend,
backend_fw,
on_device,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
dtype, x = dtype_and_x
dtype = dtype[0]
x = x[0]
x = ivy_backend.asarray(x)
if test_flags.as_variable and ivy_backend.is_float_dtype(dtype):
x = ivy_backend.functional.ivy.gradients._variable(x)
x_on_dev = ivy_backend.functional.frontends.jax.device_get(x).ivy_array
dev_from_new_x = ivy_backend.dev(x_on_dev)
# value test
assert dev_from_new_x == "cpu"
# device_put
@handle_frontend_test(
fn_tree="jax.general_functions.device_put",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_jax_device_put(
*,
dtype_and_x,
test_flags,
fn_tree,
frontend,
backend_fw,
on_device,
):
with BackendHandler.update_backend(backend_fw) as ivy_backend:
dtype, x = dtype_and_x
dtype = dtype[0]
x = x[0]
x = ivy_backend.asarray(x)
if test_flags.as_variable and ivy_backend.is_float_dtype(dtype):
x = ivy_backend.functional.ivy.gradients._variable(x)
device = ivy_backend.dev(x)
x_on_dev = ivy_backend.functional.frontends.jax.device_put(
x, on_device
).ivy_array
dev_from_new_x = ivy_backend.dev(x_on_dev)
# value test
assert dev_from_new_x == device
# vmap
@handle_frontend_test(
fn_tree="jax.general_functions.vmap",
func=st.sampled_from([_fn1, _fn2, _fn3]),
dtype_and_arrays_and_axes=helpers.arrays_and_axes(
allow_none=False,
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
num=2,
return_dtype=True,
),
in_axes_as_cont=st.booleans(),
)
def test_jax_vmap(
func,
dtype_and_arrays_and_axes,
in_axes_as_cont,
backend_fw,
):
dtype, generated_arrays, in_axes = dtype_and_arrays_and_axes
ivy.set_backend(backend_fw)
arrays = [ivy.native_array(array) for array in generated_arrays]
if in_axes_as_cont:
vmapped_func = vmap(func, in_axes=in_axes, out_axes=0)
else:
vmapped_func = vmap(func, in_axes=0, out_axes=0)
assert callable(vmapped_func)
try:
fw_res = helpers.flatten_and_to_np(
ret=vmapped_func(*arrays), backend=backend_fw
)
fw_res = fw_res if len(fw_res) else None
except Exception:
fw_res = None
ivy.previous_backend()
ivy.set_backend("jax")
arrays = [ivy.native_array(array) for array in generated_arrays]
if in_axes_as_cont:
jax_vmapped_func = jax.vmap(func, in_axes=in_axes, out_axes=0)
else:
jax_vmapped_func = jax.vmap(func, in_axes=0, out_axes=0)
assert callable(jax_vmapped_func)
try:
jax_res = helpers.flatten_and_to_np(
ret=jax_vmapped_func(*arrays), backend="jax"
)
jax_res = jax_res if len(jax_res) else None
except Exception:
jax_res = None
ivy.previous_backend()
if fw_res is not None and jax_res is not None:
helpers.value_test(
ret_np_flat=fw_res,
ret_np_from_gt_flat=jax_res,
rtol=1e-1,
atol=1e-1,
backend=backend_fw,
ground_truth_backend="jax",
)
elif fw_res is None and jax_res is None:
pass
else:
assert False, "One of the results is None while other isn't"
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_general_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_general_functions.py",
"repo_id": "ivy",
"token_count": 2139
} | 49 |
# global
from hypothesis import strategies as st, assume
import numpy as np
import hypothesis.extra.numpy as nph
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.test_functional.test_core.test_manipulation import (
_repeat_helper,
)
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_manipulation import ( # noqa
_get_dtype_values_k_axes_for_rot90,
_get_splits,
_st_tuples_or_int,
)
# --- Helpers --- #
# --------------- #
# concatenate
@st.composite
def _arrays_idx_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays")
)
common_shape = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_dims - 1,
)
)
unique_idx = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_arrays,
)
)
xs = []
input_dtypes = draw(
helpers.array_dtypes(available_dtypes=draw(helpers.get_dtypes("valid")))
)
for ud, dt in zip(unique_dims, input_dtypes):
x = draw(
helpers.array_values(
shape=common_shape[:unique_idx] + [ud] + common_shape[unique_idx:],
dtype=dt,
)
)
xs.append(x)
return xs, input_dtypes, unique_idx
@st.composite
def _get_clip_inputs(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10
)
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=shape,
min_value=-1e10,
max_value=1e10,
)
)
min = draw(st.booleans())
if min:
max = draw(st.booleans())
min = draw(
helpers.array_values(
dtype=x_dtype[0], shape=shape, min_value=-50, max_value=5
)
)
max = (
draw(
helpers.array_values(
dtype=x_dtype[0], shape=shape, min_value=6, max_value=50
)
)
if max
else None
)
else:
min = None
max = draw(
helpers.array_values(
dtype=x_dtype[0], shape=shape, min_value=6, max_value=50
)
)
return x_dtype, x, min, max
# block
@st.composite
def _get_input_and_block(draw):
shapes = draw(
st.lists(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10
),
min_size=2,
max_size=10,
)
)
x_dtypes, xs = zip(
*[
draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
shape=shape,
)
)
for shape in shapes
]
)
return x_dtypes, xs
# broadcast_to
@st.composite
def _get_input_and_broadcast_shape(draw):
dim1 = draw(helpers.ints(min_value=2, max_value=5))
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
shape=(dim1,),
)
)
broadcast_dim = draw(helpers.ints(min_value=1, max_value=3))
shape = ()
for _ in range(broadcast_dim):
shape += (draw(helpers.ints(min_value=1, max_value=dim1)),)
shape += (dim1,)
return x_dtype, x, shape
# resize
@st.composite
def _get_input_and_new_shape(draw):
shape = draw(
helpers.get_shape(
min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=10
)
)
new_shape = draw(
helpers.get_shape(
min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=10
).filter(lambda x: np.prod(x) == np.prod(shape))
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
shape=shape,
)
)
return x_dtype, x, new_shape
# reshape
@st.composite
def _get_input_and_reshape(draw):
shape = draw(
helpers.get_shape(
min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=10
)
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
shape=shape,
)
)
new_shape = shape[1:] + (shape[0],)
return x_dtype, x, new_shape
# swapaxes
@st.composite
def _get_input_and_two_swapabble_axes(draw):
x_dtype, x, x_shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
ret_shape=True,
min_num_dims=1,
max_num_dims=10,
)
)
axis1 = draw(
helpers.ints(
min_value=-1 * len(x_shape),
max_value=len(x_shape) - 1,
)
)
axis2 = draw(
helpers.ints(
min_value=-1 * len(x_shape),
max_value=len(x_shape) - 1,
)
)
return x_dtype, x, axis1, axis2
# pad
@st.composite
def _pad_helper(draw):
mode = draw(
st.sampled_from(
[
"constant",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
]
)
)
if mode == "median":
dtypes = "float"
else:
dtypes = "numeric"
dtype, input, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(dtypes),
ret_shape=True,
min_num_dims=1,
min_value=-100,
max_value=100,
).filter(
lambda x: x[0][0] not in ["float16", "bfloat16", "complex64", "complex128"]
),
)
ndim = len(shape)
pad_width = draw(_st_tuples_or_int(ndim, min_val=0))
kwargs = {}
if mode in ["reflect", "symmetric"]:
kwargs["reflect_type"] = draw(st.sampled_from(["even", "odd"]))
if mode in ["maximum", "mean", "median", "minimum"]:
kwargs["stat_length"] = draw(_st_tuples_or_int(ndim, min_val=2))
if mode in ["linear_ramp"]:
kwargs["end_values"] = draw(_st_tuples_or_int(ndim))
if mode == "constant":
kwargs["constant_values"] = draw(_st_tuples_or_int(ndim))
return dtype, input[0], pad_width, kwargs, mode
# TODO: uncomment when block is reimplemented
# @handle_frontend_test(
# fn_tree="jax.numpy.block",
# input_x_shape=_get_input_and_block(),
# test_with_out=st.just(False),
# )
# def test_jax_block(
# *,
# input_x_shape,
# on_device,
# fn_tree,
# frontend,
# test_flags,
# ):
# x_dtypes, xs = input_x_shape
# helpers.test_frontend_function(
# input_dtypes=x_dtypes,
# frontend=frontend,
# test_flags=test_flags,
# fn_tree=fn_tree,
# on_device=on_device,
# arrays=xs,
# )
@st.composite
def _squeeze_helper(draw):
shape = draw(st.shared(helpers.get_shape(), key="shape"))
valid_axes = [idx for idx in range(len(shape)) if shape[idx] == 1] + [None]
return draw(st.sampled_from(valid_axes))
# --- Main --- #
# ------------ #
# append
@handle_frontend_test(
fn_tree="jax.numpy.append",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shape=helpers.get_shape(
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
shared_dtype=True,
valid_axis=True,
allow_neg_axes=True,
force_int_axis=True,
),
test_with_out=st.just(False),
)
def test_jax_append(
dtype_values_axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, values, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
arr=values[0],
values=values[1],
axis=axis,
)
# array_split
@handle_frontend_test(
fn_tree="jax.numpy.array_split",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=1, allow_none=False, is_mod_split=True
),
axis=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
test_with_out=st.just(False),
)
def test_jax_array_split(
*,
dtype_value,
indices_or_sections,
axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, value = dtype_value
assume(isinstance(indices_or_sections, int))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
axis=axis,
)
# atleast_1d
@handle_frontend_test(
fn_tree="jax.numpy.atleast_1d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=10),
),
test_with_out=st.just(False),
)
def test_jax_atleast_1d(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, arrays = dtype_and_x
arys = {}
for i, (array, idtype) in enumerate(zip(arrays, input_dtype)):
arys[f"arrs{i}"] = np.asarray(array, dtype=idtype)
test_flags.num_positional_args = len(arys)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**arys,
)
# atleast_2d
@handle_frontend_test(
fn_tree="jax.numpy.atleast_2d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=10),
),
test_with_out=st.just(False),
)
def test_jax_atleast_2d(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, arrays = dtype_and_x
arys = {}
for i, (array, idtype) in enumerate(zip(arrays, input_dtype)):
arys[f"arrs{i}"] = np.asarray(array, dtype=idtype)
test_flags.num_positional_args = len(arys)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**arys,
)
# atleast_3d
@handle_frontend_test(
fn_tree="jax.numpy.atleast_3d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=10),
),
test_with_out=st.just(False),
)
def test_jax_atleast_3d(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, arrays = dtype_and_x
arys = {}
for i, (array, idtype) in enumerate(zip(arrays, input_dtype)):
arys[f"arrs{i}"] = np.asarray(array, dtype=idtype)
test_flags.num_positional_args = len(arys)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**arys,
)
# bartlett
@handle_frontend_test(
fn_tree="jax.numpy.bartlett",
m=helpers.ints(min_value=0, max_value=20),
)
def test_jax_bartlett(
m,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=["int64"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
M=m,
)
# blackman
@handle_frontend_test(
fn_tree="jax.numpy.blackman",
m=helpers.ints(min_value=0, max_value=20),
)
def test_jax_blackman(
m,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=["int64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
M=m,
)
# broadcast_arrays
@handle_frontend_test(
fn_tree="jax.numpy.broadcast_arrays",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=helpers.ints(min_value=1, max_value=10),
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_broadcast_arrays(
*,
dtype_value,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, value = dtype_value
arrys = {}
for i, v in enumerate(value):
arrys[f"array{i}"] = v
test_flags.num_positional_args = len(arrys)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**arrys,
)
# broadcast_shapes
@handle_frontend_test(
fn_tree="jax.numpy.broadcast_shapes",
shapes=nph.mutually_broadcastable_shapes(
num_shapes=4, min_dims=1, max_dims=5, min_side=1, max_side=5
),
test_with_out=st.just(False),
)
def test_jax_broadcast_shapes(
*,
shapes,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
shape, _ = shapes
shapes = {f"shape{i}": shape[i] for i in range(len(shape))}
test_flags.num_positional_args = len(shapes)
ret, frontend_ret = helpers.test_frontend_function(
input_dtypes=["int64"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**shapes,
test_values=False,
)
assert ret == frontend_ret
@handle_frontend_test(
fn_tree="jax.numpy.broadcast_to",
input_x_broadcast=_get_input_and_broadcast_shape(),
test_with_out=st.just(False),
)
def test_jax_broadcast_to(
*,
input_x_broadcast,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
x_dtype, x, shape = input_x_broadcast
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
array=x[0],
shape=shape,
)
# clip
@handle_frontend_test(
fn_tree="jax.numpy.clip",
input_and_ranges=_get_clip_inputs(),
)
def test_jax_clip(
*,
input_and_ranges,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
x_dtype, x, min, max = input_and_ranges
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
a_min=min,
a_max=max,
)
# column_stack
@handle_frontend_test(
fn_tree="jax.numpy.column_stack",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
),
factor=helpers.ints(min_value=2, max_value=6),
)
def test_jax_column_stack(
dtype_and_x,
factor,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
ys = [x[0]]
for i in range(factor):
ys += [x[0]]
helpers.test_frontend_function(
input_dtypes=[dtype[0]] * (factor + 1),
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tup=ys,
)
@handle_frontend_test(
fn_tree="jax.numpy.concatenate",
xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(),
test_with_out=st.just(False),
)
def test_jax_concat(
*,
xs_n_input_dtypes_n_unique_idx,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
xs, input_dtypes, unique_idx = xs_n_input_dtypes_n_unique_idx
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
arrays=xs,
axis=unique_idx,
)
@handle_frontend_test(
fn_tree="jax.numpy.diagflat",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=helpers.get_shape(
min_num_dims=1, max_num_dims=2, min_dim_size=1, max_dim_size=10
),
small_abs_safety_factor=2.5,
large_abs_safety_factor=2.5,
safety_factor_scale="log",
),
k=st.integers(min_value=-5, max_value=5),
)
def test_jax_diagflat(
dtype_x,
k,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
v=x[0],
k=k,
)
# dsplit
@handle_frontend_test(
fn_tree="jax.numpy.dsplit",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=3), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=3, axis=2, allow_none=False, is_mod_split=True
),
test_with_out=st.just(False),
)
def test_jax_dsplit(
*,
dtype_value,
indices_or_sections,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
)
# dstack
@handle_frontend_test(
fn_tree="jax.numpy.dstack",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shared_dtype=True,
num_arrays=helpers.ints(min_value=1, max_value=10),
shape=helpers.get_shape(
min_num_dims=1,
),
),
test_with_out=st.just(False),
)
def test_jax_dstack(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tup=x,
)
# expand_dims
@handle_frontend_test(
fn_tree="jax.numpy.expand_dims",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
force_int_axis=True,
valid_axis=True,
),
)
def test_jax_expand_dims(
*,
dtype_x_axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
)
# flip
@handle_frontend_test(
fn_tree="jax.numpy.flip",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
min_size=1,
max_size=1,
force_int=True,
),
test_with_out=st.just(False),
)
def test_jax_flip(
*,
dtype_value,
axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=value[0],
axis=axis,
)
# fliplr
@handle_frontend_test(
fn_tree="jax.numpy.fliplr",
dtype_and_m=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
),
test_with_out=st.just(False),
)
def test_jax_fliplr(
*,
dtype_and_m,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, m = dtype_and_m
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=m[0],
)
# flipud
@handle_frontend_test(
fn_tree="jax.numpy.flipud",
dtype_and_m=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
test_with_out=st.just(False),
)
def test_jax_flipud(
*,
dtype_and_m,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, m = dtype_and_m
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=m[0],
)
# hamming
@handle_frontend_test(
fn_tree="jax.numpy.hamming",
m=helpers.ints(min_value=0, max_value=20),
)
def test_jax_hamming(
m,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=["int64"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
M=m,
)
# hanning
@handle_frontend_test(
fn_tree="jax.numpy.hanning",
m=helpers.ints(min_value=0, max_value=20),
)
def test_jax_hanning(
m,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=["int64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
M=m,
)
# hsplit
@handle_frontend_test(
fn_tree="jax.numpy.hsplit",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=2, axis=1, allow_none=False, is_mod_split=True
),
test_with_out=st.just(False),
)
def test_jax_hsplit(
*,
dtype_value,
indices_or_sections,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
)
# kaiser
@handle_frontend_test(
fn_tree="jax.numpy.kaiser",
m=helpers.ints(min_value=0, max_value=100),
beta=helpers.floats(min_value=-10, max_value=10),
)
def test_jax_kaiser(
m,
beta,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=["int64", "float64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
M=m,
beta=beta,
)
# moveaxis
@handle_frontend_test(
fn_tree="jax.numpy.moveaxis",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-100,
max_value=100,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
),
source=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
destination=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
test_with_out=st.just(False),
)
def test_jax_moveaxis(
*,
dtype_and_a,
source,
destination,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
source=source,
destination=destination,
)
# trim_zeros
@handle_frontend_test(
fn_tree="jax.numpy.trim_zeros",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, max_num_dims=1
),
trim=st.sampled_from(["f", "b", "fb"]),
)
def test_jax_numpy_trim_zeros(
frontend,
on_device,
*,
dtype_and_x,
backend_fw,
trim,
fn_tree,
test_flags,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
backend_to_test=backend_fw,
filt=x[0],
trim=trim,
)
@handle_frontend_test(
fn_tree="jax.numpy.pad",
dtype_and_input_and_other=_pad_helper(),
test_with_out=st.just(False),
)
def test_jax_pad(
*,
dtype_and_input_and_other,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
(
dtype,
input,
pad_width,
kwargs,
mode,
) = dtype_and_input_and_other
if isinstance(pad_width, int):
pad_width = ((pad_width, pad_width),) * input.ndim
else:
pad_width = tuple(
tuple(pair) if isinstance(pair, list) else pair for pair in pad_width
)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
array=input,
pad_width=pad_width,
mode=mode,
**kwargs,
)
# ravel
@handle_frontend_test(
fn_tree="jax.numpy.ravel",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
shape=helpers.get_shape(
min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=10
),
),
order=st.sampled_from(["C", "F"]),
test_with_out=st.just(False),
)
def test_jax_ravel(
*,
dtype_and_values,
order,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtypes, x = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
order=order,
)
# repeat
@handle_frontend_test(
fn_tree="jax.numpy.repeat",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
axis=st.shared(
st.one_of(
st.none(),
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
max_size=1,
),
),
key="axis",
),
repeat=st.one_of(st.integers(1, 10), _repeat_helper()),
test_with_out=st.just(False),
)
def test_jax_repeat(
*,
dtype_value,
axis,
repeat,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
value_dtype, value = dtype_value
if not isinstance(repeat, int):
repeat_dtype, repeat_list = repeat
repeat = repeat_list[0]
value_dtype += repeat_dtype
if not isinstance(axis, int) and axis is not None:
axis = axis[0]
helpers.test_frontend_function(
input_dtypes=value_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=value[0],
repeats=repeat,
axis=axis,
)
@handle_frontend_test(
fn_tree="jax.numpy.reshape",
input_x_shape=_get_input_and_reshape(),
order=st.sampled_from(["C", "F"]),
test_with_out=st.just(False),
)
def test_jax_reshape(
*,
input_x_shape,
order,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
x_dtype, x, shape = input_x_shape
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
newshape=shape,
order=order,
)
@handle_frontend_test(
fn_tree="jax.numpy.resize",
input_x_shape=_get_input_and_new_shape(),
test_with_out=st.just(True),
)
def test_jax_resize(
*,
input_x_shape,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
x_dtype, x, new_shape = input_x_shape
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
new_shape=new_shape,
)
# roll
@handle_frontend_test(
fn_tree="jax.numpy.roll",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
shift=helpers.dtype_and_values(
available_dtypes=[ivy.int32],
max_num_dims=1,
min_dim_size=st.shared(
helpers.ints(min_value=1, max_value=10),
key="shift_len",
),
max_dim_size=st.shared(
helpers.ints(min_value=1, max_value=10),
key="shift_len",
),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_tuple=True,
unique=False,
min_size=st.shared(
helpers.ints(min_value=1, max_value=10),
key="shift_len",
),
max_size=st.shared(
helpers.ints(min_value=1, max_value=10),
key="shift_len",
),
),
test_with_out=st.just(False),
)
def test_jax_roll(
*,
dtype_value,
shift,
axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
value_dtype, value = dtype_value
shift_dtype, shift_val = shift
if shift_val[0].ndim == 0: # If shift is an int
shift_val = shift_val[0] # Drop shift's dtype (always int32)
axis = axis[0] # Extract an axis value from the tuple
else:
# Drop shift's dtype (always int32) and convert list to tuple
shift_val = tuple(shift_val[0].tolist())
helpers.test_frontend_function(
input_dtypes=value_dtype + shift_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=value[0],
shift=shift_val,
axis=axis,
)
# rot90
@handle_frontend_test(
fn_tree="jax.numpy.rot90",
dtype_m_k_axes=_get_dtype_values_k_axes_for_rot90(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
test_with_out=st.just(False),
)
def test_jax_rot90(
*,
dtype_m_k_axes,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, m, k, axes = dtype_m_k_axes
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=m,
k=k,
axes=tuple(axes),
)
# row_stack
@handle_frontend_test(
fn_tree="jax.numpy.row_stack",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
),
factor=helpers.ints(min_value=2, max_value=6),
)
def test_jax_row_stack(
dtype_and_x,
factor,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
xs = [x[0]]
for i in range(factor):
xs += [x[0]]
helpers.test_frontend_function(
input_dtypes=[dtype[0]] * (factor + 1),
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tup=xs,
)
# split
@handle_frontend_test(
fn_tree="jax.numpy.split",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=1, allow_none=False, is_mod_split=True
),
axis=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
test_with_out=st.just(False),
)
def test_jax_split(
*,
dtype_value,
indices_or_sections,
axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
axis=axis,
)
# squeeze
@handle_frontend_test(
fn_tree="jax.numpy.squeeze",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="shape"),
),
axis=_squeeze_helper(),
test_with_out=st.just(False),
)
def test_jax_squeeze(
*,
dtype_and_values,
axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, values = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=values[0],
axis=axis,
)
# stack
@handle_frontend_test(
fn_tree="jax.numpy.stack",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays"),
shape=helpers.get_shape(min_num_dims=1),
shared_dtype=True,
valid_axis=True,
allow_neg_axes=True,
force_int_axis=True,
),
dtype=helpers.get_dtypes("valid", full=False),
)
def test_jax_stack(
dtype_values_axis,
dtype,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, values, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
arrays=values,
axis=axis,
)
@handle_frontend_test(
fn_tree="jax.numpy.swapaxes",
input_x_axis1_axis2=_get_input_and_two_swapabble_axes(),
test_with_out=st.just(False),
)
def test_jax_swapaxes(
*,
input_x_axis1_axis2,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
x_dtype, x, axis1, axis2 = input_x_axis1_axis2
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis1=axis1,
axis2=axis2,
)
# take
@handle_frontend_test(
fn_tree="jax.numpy.take",
dtype_indices_axis=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("numeric"),
indices_dtypes=["int32", "int64"],
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
indices_same_dims=True,
),
)
def test_jax_take(
*,
dtype_indices_axis,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtypes, value, indices, axis, _ = dtype_indices_axis
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=value,
indices=indices,
axis=axis,
)
# tile
@handle_frontend_test(
fn_tree="jax.numpy.tile",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
repeat=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("signed_integer"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape").map(
lambda rep: (len(rep),)
),
min_value=0,
max_value=10,
),
test_with_out=st.just(False),
)
def test_jax_tile(
*,
dtype_value,
repeat,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype, value = dtype_value
repeat_dtype, repeat_list = repeat
helpers.test_frontend_function(
input_dtypes=dtype + repeat_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
A=value[0],
reps=repeat_list[0],
)
# transpose
@handle_frontend_test(
fn_tree="jax.numpy.transpose",
array_and_axes=np_frontend_helpers._array_and_axes_permute_helper(
min_num_dims=0,
max_num_dims=5,
min_dim_size=0,
max_dim_size=10,
),
test_with_out=st.just(False),
)
def test_jax_transpose(
*,
array_and_axes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
array, dtype, axes = array_and_axes
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=array,
axes=axes,
)
# tri
@handle_frontend_test(
fn_tree="jax.numpy.tri",
rows=helpers.ints(min_value=3, max_value=10),
cols=helpers.ints(min_value=3, max_value=10),
k=helpers.ints(min_value=-10, max_value=10),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_jax_tri(
rows,
cols,
k,
dtype,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
N=rows,
M=cols,
k=k,
dtype=dtype[0],
)
# tril
@handle_frontend_test(
fn_tree="jax.numpy.tril",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_jax_tril(
*,
dtype_and_x,
k,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=x[0],
k=k,
)
# vsplit
@handle_frontend_test(
fn_tree="jax.numpy.vsplit",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=2, axis=0, allow_none=False, is_mod_split=True
),
test_with_out=st.just(False),
)
def test_jax_vsplit(
*,
dtype_value,
indices_or_sections,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_manipulations.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_manipulations.py",
"repo_id": "ivy",
"token_count": 23436
} | 50 |
# global
from hypothesis import strategies as st
import numpy as np
# local
import ivy
from ivy.functional.frontends.numpy import broadcast
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _broadcastable_arrays(draw):
num_of_array = draw(st.integers(1, 3))
shapes = draw(helpers.mutually_broadcastable_shapes(num_shapes=num_of_array))
xs = []
for i in range(num_of_array):
xs.append(
draw(
helpers.array_values(dtype=helpers.get_dtypes("valid"), shape=shapes[i])
)
)
return xs
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_broadcastable_arrays(),
)
def test_numpy_broadcast_method_reset(args):
ret = broadcast(*args)
ret_gt = np.broadcast(*args)
for _ in zip(ret, ret_gt):
pass
ret.reset()
ret_gt.reset()
assert ret.index == ret_gt.index
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_broadcastable_arrays(),
)
def test_numpy_broadcast_property_index(args):
ret = broadcast(*args)
ret_gt = np.broadcast(*args)
assert ret.index == ret_gt.index
for _ in zip(ret, ret_gt):
assert ret.index == ret_gt.index
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_broadcastable_arrays(),
)
def test_numpy_broadcast_property_iters(args):
ret = list(map(list, broadcast(*args).iters))
ret_gt = np.array(list(map(list, np.broadcast(*args).iters)))
assert ivy.all(ret == ret_gt)
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_broadcastable_arrays(),
)
def test_numpy_broadcast_property_nd(args):
ret = broadcast(*args)
ret_gt = np.broadcast(*args)
assert ret.nd == ret_gt.nd
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_broadcastable_arrays(),
)
def test_numpy_broadcast_property_ndim(args):
ret = broadcast(*args)
ret_gt = np.broadcast(*args)
assert ret.ndim == ret_gt.ndim
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_broadcastable_arrays(),
)
def test_numpy_broadcast_property_numiter(args):
ret = broadcast(*args)
ret_gt = np.broadcast(*args)
assert ret.numiter == ret_gt.numiter
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_broadcastable_arrays(),
)
def test_numpy_broadcast_property_shape(args):
ret = broadcast(*args)
ret_gt = np.broadcast(*args)
assert ret.shape == ret_gt.shape
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_broadcastable_arrays(),
)
def test_numpy_broadcast_property_size(args):
ret = broadcast(*args)
ret_gt = np.broadcast(*args)
assert ret.size == ret_gt.size
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_broadcast/test_methods.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_broadcast/test_methods.py",
"repo_id": "ivy",
"token_count": 1200
} | 51 |
# global
import numpy as np
from hypothesis import strategies as st
from numpy import triu, tril
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
# unravel_index
@st.composite
def max_value_as_shape_prod(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
)
)
dtype_and_x = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_value=0,
max_value=np.prod(shape) - 1,
)
)
return dtype_and_x, shape
@handle_frontend_test(
fn_tree="numpy.diag_indices",
n=helpers.ints(min_value=1, max_value=10),
ndim=helpers.ints(min_value=2, max_value=10),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_numpy_diag_indices(
n,
ndim,
dtype,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
n=n,
ndim=ndim,
)
@handle_frontend_test(
fn_tree="numpy.indices",
dimensions=helpers.get_shape(min_num_dims=1),
dtype=helpers.get_dtypes(kind="float", full=False),
sparse=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_indices(
*,
dimensions,
dtype,
sparse,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
dimensions=dimensions,
dtype=dtype[0],
sparse=sparse,
)
@handle_frontend_test(
fn_tree="numpy.mask_indices",
n=helpers.ints(min_value=3, max_value=10),
mask_func=st.sampled_from([triu, tril]),
k=helpers.ints(min_value=-5, max_value=5),
input_dtype=helpers.get_dtypes("numeric"),
test_with_out=st.just(False),
number_positional_args=st.just(2),
)
def test_numpy_mask_indices(
n,
mask_func,
k,
input_dtype,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
n=n,
mask_func=mask_func,
k=k,
)
@handle_frontend_test(
fn_tree="numpy.tril_indices",
n=helpers.ints(min_value=1, max_value=10),
m=helpers.ints(min_value=1, max_value=10),
k=st.integers(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_numpy_tril_indices(
*,
n,
m,
k,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
helpers.test_frontend_function(
input_dtypes=["int32"],
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
n=n,
k=k,
m=m,
)
@handle_frontend_test(
fn_tree="numpy.tril_indices_from",
dtype_and_values=helpers.dtype_and_values(
dtype=["float32"],
min_dim_size=3,
max_dim_size=3,
min_num_dims=2,
max_num_dims=2,
array_api_dtypes=True,
),
k=st.integers(min_value=-10, max_value=10),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_numpy_tril_indices_from(
*,
dtype_and_values,
k,
dtype,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype, values = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
arr=values[0],
k=k,
)
@handle_frontend_test(
fn_tree="numpy.unravel_index",
dtype_x_shape=max_value_as_shape_prod(),
test_with_out=st.just(False),
)
def test_numpy_unravel_index(
*,
dtype_x_shape,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtype_and_x, shape = dtype_x_shape
input_dtype, x = dtype_and_x[0], dtype_and_x[1]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
indices=x[0],
shape=shape,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_generating_index_arrays.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_generating_index_arrays.py",
"repo_id": "ivy",
"token_count": 2529
} | 52 |
# local
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# logical_and
@handle_frontend_test(
fn_tree="numpy.logical_and",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=("bool",),
num_arrays=2,
)
],
special=True,
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="logical_and"
),
)
def test_numpy_logical_and(
dtypes_values_casting,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
out=None,
where=where,
casting=casting,
order="K",
dtype="bool",
subok=True,
)
# logical_not
@handle_frontend_test(
fn_tree="numpy.logical_not",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=("bool",),
)
],
special=True,
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="logical_not"
),
)
def test_numpy_logical_not(
dtypes_values_casting,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, casting, _ = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype="bool",
subok=True,
)
# logical_or
@handle_frontend_test(
fn_tree="numpy.logical_or",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=("bool",),
num_arrays=2,
)
],
special=True,
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="logical_or"
),
)
def test_numpy_logical_or(
dtypes_values_casting,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
out=None,
where=where,
casting=casting,
order="K",
dtype="bool",
subok=True,
)
# logical_xor
@handle_frontend_test(
fn_tree="numpy.logical_xor",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=("bool",),
num_arrays=2,
)
],
special=True,
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="logical_xor"
),
)
def test_numpy_logical_xor(
dtypes_values_casting,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
out=None,
where=where,
casting=casting,
order="K",
dtype="bool",
subok=True,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_logic/test_logical_operations.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_logic/test_logical_operations.py",
"repo_id": "ivy",
"token_count": 2664
} | 53 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _pad_helper(draw):
mode = draw(
st.sampled_from(
[
"constant",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
]
)
)
if mode in ["median", "mean"]:
dtypes = "float"
else:
dtypes = "numeric"
dtype, input, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(dtypes),
ret_shape=True,
min_num_dims=1,
min_value=-100,
max_value=100,
).filter(
lambda x: x[0][0] not in ["float16", "bfloat16", "complex64", "complex128"]
),
)
ndim = len(shape)
pad_width = draw(_st_tuples_or_int(ndim, min_val=0))
kwargs = {}
if mode in ["reflect", "symmetric"]:
kwargs["reflect_type"] = draw(st.sampled_from(["even", "odd"]))
if mode in ["maximum", "mean", "median", "minimum"]:
kwargs["stat_length"] = draw(_st_tuples_or_int(ndim, min_val=2))
if mode in ["linear_ramp"]:
kwargs["end_values"] = draw(_st_tuples_or_int(ndim))
if mode == "constant":
kwargs["constant_values"] = draw(_st_tuples_or_int(ndim))
return dtype, input[0], pad_width, kwargs, mode
def _st_tuples_or_int(n_pairs, min_val=0):
return st.one_of(
st_tuples(
st.tuples(
st.integers(min_value=min_val, max_value=4),
st.integers(min_value=min_val, max_value=4),
),
min_size=n_pairs,
max_size=n_pairs,
),
helpers.ints(min_value=min_val, max_value=4),
)
# --- Main --- #
# ------------ #
def st_tuples(elements, *, min_size=0, max_size=None, unique_by=None, unique=False):
return st.lists(
elements,
min_size=min_size,
max_size=max_size,
unique_by=unique_by,
unique=unique,
).map(tuple)
# pad
@handle_frontend_test(
fn_tree="numpy.pad",
args=_pad_helper(),
test_with_out=st.just(False),
)
def test_numpy_pad(
*,
args,
fn_tree,
backend_fw,
on_device,
test_flags,
frontend,
):
dtype, x, pad_width, kwargs, mode = args
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test="numpy",
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
array=x,
pad_width=pad_width,
mode=mode,
**kwargs,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_padding_arrays.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_padding_arrays.py",
"repo_id": "ivy",
"token_count": 1523
} | 54 |
# global
import numpy as np
from hypothesis import strategies as st, assume
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
# trapz
@st.composite
def _either_x_dx(draw):
rand = (draw(st.integers(min_value=0, max_value=1)),)
if rand == 0:
either_x_dx = draw(
helpers.dtype_and_values(
available_dtypes=st.shared(
helpers.get_dtypes("float"), key="trapz_dtype"
),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
)
)
return rand, either_x_dx
else:
either_x_dx = draw(
st.floats(min_value=-10, max_value=10),
)
return rand, either_x_dx
# helpers
@st.composite
def _get_castable_dtypes_values(draw, *, allow_nan=False, use_where=False):
available_dtypes = helpers.get_dtypes("numeric")
shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=4, max_dim_size=6))
dtype, values = draw(
helpers.dtype_and_values(
available_dtypes=available_dtypes,
num_arrays=1,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
shape=shape,
allow_nan=allow_nan,
)
)
axis = draw(helpers.get_axis(shape=shape, force_int=True))
dtype1, values, dtype2 = draw(
helpers.get_castable_dtype(draw(available_dtypes), dtype[0], values[0])
)
if use_where:
where = draw(np_frontend_helpers.where(shape=shape))
return [dtype1], [values], axis, dtype2, where
return [dtype1], [values], axis, dtype2
# --- Main --- #
# ------------ #
# cumprod
@handle_frontend_test(
fn_tree="numpy.cumprod",
dtype_x_axis_dtypes=_get_castable_dtypes_values(),
)
def test_numpy_cumprod(
dtype_x_axis_dtypes,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, axis, dtype = dtype_x_axis_dtypes
# ToDo: set as_variable_flags as the parameter generated by test_cumprod once
# this issue is marked as completed https://github.com/pytorch/pytorch/issues/75733
if backend_fw == "torch":
assume(not test_flags.as_variable[0])
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
dtype=dtype,
)
# cumsum
@handle_frontend_test(
fn_tree="numpy.cumsum",
dtype_and_x_axis_dtype=_get_castable_dtypes_values(),
)
def test_numpy_cumsum(
dtype_and_x_axis_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, axis, dtype = dtype_and_x_axis_dtype
# ToDo: set as_variable_flags as the parameter generated by test_cumprod once
# this issue is marked as completed https://github.com/pytorch/pytorch/issues/75733
if backend_fw == "torch":
assume(not test_flags.as_variable[0])
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
dtype=dtype,
)
# diff
@handle_frontend_test(
fn_tree="numpy.diff",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
),
)
def test_numpy_diff(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, axis = dtype_x_axis
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
)
# ediff1d
@handle_frontend_test(
fn_tree="numpy.ediff1d",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, max_num_dims=1
),
to_end=st.one_of(
st.integers(-1, 10), st.lists(st.integers(-1, 10), min_size=1, max_size=10)
),
to_begin=st.one_of(
st.integers(-1, 10), st.lists(st.integers(-1, 10), min_size=1, max_size=10)
),
)
def test_numpy_ediff1d(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
to_end,
to_begin,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_flags=test_flags,
ary=x[0],
to_end=to_end,
to_begin=to_begin,
)
# nancumprod
@handle_frontend_test(
fn_tree="numpy.nancumprod",
dtype_and_x_axis_dtype=_get_castable_dtypes_values(allow_nan=True),
)
def test_numpy_nancumprod(
dtype_and_x_axis_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, axis, dtype = dtype_and_x_axis_dtype
if backend_fw == "torch":
assume(not test_flags.as_variable[0])
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
dtype=dtype,
)
# nancumsum
@handle_frontend_test(
fn_tree="numpy.nancumsum",
dtype_and_x_axis_dtype=_get_castable_dtypes_values(allow_nan=True),
)
def test_numpy_nancumsum(
dtype_and_x_axis_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, axis, dtype = dtype_and_x_axis_dtype
if backend_fw == "torch":
assume(not test_flags.as_variable[0])
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
dtype=dtype,
)
# nanprod
@handle_frontend_test(
fn_tree="numpy.nanprod",
dtype_and_x_dtype=_get_castable_dtypes_values(allow_nan=True, use_where=True),
keepdims=st.booleans(),
initial=st.one_of(st.floats(min_value=-100, max_value=100)),
)
def test_numpy_nanprod(
dtype_and_x_dtype,
initial,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keepdims,
):
input_dtypes, x, axis, dtype, where = dtype_and_x_dtype
if backend_fw == "torch":
assume(not test_flags.as_variable[0])
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
dtype=dtype,
initial=initial,
where=where,
keepdims=keepdims,
)
# nansum
@handle_frontend_test(
fn_tree="numpy.nansum",
dtype_and_x_dtype=_get_castable_dtypes_values(allow_nan=True, use_where=True),
keepdims=st.booleans(),
initial=st.one_of(st.floats(min_value=-100, max_value=100)),
)
def test_numpy_nansum(
dtype_and_x_dtype,
initial,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keepdims,
):
input_dtypes, x, axis, dtype, where = dtype_and_x_dtype
if backend_fw == "torch":
assume(not test_flags.as_variable[0])
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
dtype=dtype,
initial=initial,
where=where,
keepdims=keepdims,
)
# prod
@handle_frontend_test(
fn_tree="numpy.prod",
dtype_x_axis_dtype=_get_castable_dtypes_values(use_where=True),
keep_dims=st.booleans(),
initial=st.one_of(st.floats(min_value=-100, max_value=100)),
)
def test_numpy_prod(
dtype_x_axis_dtype,
keep_dims,
initial,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, axis, dtype, where = dtype_x_axis_dtype
if backend_fw == "torch":
assume(not test_flags.as_variable[0])
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
dtype=dtype,
keepdims=keep_dims,
initial=initial,
where=where,
)
# sum
@handle_frontend_test(
fn_tree="numpy.sum",
dtype_x_axis_dtype=_get_castable_dtypes_values(use_where=True),
keep_dims=st.booleans(),
initial=st.one_of(st.floats(min_value=-100, max_value=100)),
)
def test_numpy_sum(
dtype_x_axis_dtype,
keep_dims,
initial,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, axis, dtype, where = dtype_x_axis_dtype
if backend_fw == "torch":
assume(not test_flags.as_variable[0])
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
dtype=dtype,
keepdims=keep_dims,
initial=initial,
where=where,
)
@handle_frontend_test(
fn_tree="numpy.trapz",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=st.shared(helpers.get_dtypes("float"), key="trapz_dtype"),
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
allow_neg_axes=True,
valid_axis=True,
force_int_axis=True,
),
rand_either=_either_x_dx(),
)
def test_numpy_trapz(
dtype_values_axis,
rand_either,
fn_tree,
frontend,
test_flags,
on_device,
backend_fw,
):
input_dtype, y, axis = dtype_values_axis
rand, either_x_dx = rand_either
if rand == 0:
dtype_x, x = either_x_dx
x = np.asarray(x, dtype=dtype_x)
dx = None
else:
x = None
dx = either_x_dx
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
y=np.asarray(y[0], dtype=input_dtype[0]),
x=x,
dx=dx,
axis=axis,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_sums_products_differences.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_sums_products_differences.py",
"repo_id": "ivy",
"token_count": 6115
} | 55 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_helpers
# corrcoef
@handle_frontend_test(
fn_tree="numpy.corrcoef",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
abs_smallest_val=1e-5,
min_num_dims=2,
max_num_dims=2,
min_dim_size=3,
max_dim_size=3,
min_value=-100,
max_value=100,
),
rowvar=st.booleans(),
dtype=helpers.get_dtypes("float", full=False),
)
def test_numpy_corrcoef(
dtype_and_x,
rowvar,
frontend,
test_flags,
fn_tree,
on_device,
dtype,
backend_fw,
):
input_dtypes, x = dtype_and_x
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
rowvar=rowvar,
dtype=dtype[0],
backend_to_test=backend_fw,
)
# correlate
@handle_frontend_test(
fn_tree="numpy.correlate",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=1,
num_arrays=2,
shared_dtype=True,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
),
mode=st.sampled_from(["valid", "same", "full"]),
test_with_out=st.just(False),
)
def test_numpy_correlate(
dtype_and_x,
mode,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-3,
atol=1e-3,
a=xs[0],
v=xs[1],
mode=mode,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_statistics/test_correlating.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_statistics/test_correlating.py",
"repo_id": "ivy",
"token_count": 1125
} | 56 |
# global
from hypothesis import strategies as st
import math
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_manipulation import ( # noqa
_get_dtype_values_k_axes_for_rot90,
)
from ivy_tests.test_ivy.test_frontends.test_torch.test_miscellaneous_ops import (
_get_repeat_interleaves_args,
)
# --- Helpers --- #
# --------------- #
# stack
@st.composite
def _arrays_axis_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=2, max_value=5), key="num_dims"))
num_arrays = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays")
)
common_shape = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_dims - 1,
)
)
axis = draw(st.sampled_from(list(range(num_dims))))
xs = []
input_dtypes = draw(
helpers.array_dtypes(available_dtypes=draw(helpers.get_dtypes("numeric")))
)
dtype = draw(st.sampled_from(input_dtypes))
for _ in range(num_arrays):
x = draw(
helpers.array_values(
shape=common_shape,
dtype=dtype,
)
)
xs.append(x)
input_dtypes = [dtype] * len(input_dtypes)
return xs, input_dtypes, axis
@st.composite
def _arrays_dim_idx_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = 2
common_shape = draw(
helpers.lists(
x=helpers.ints(min_value=2, max_value=3),
min_size=num_dims - 1,
max_size=num_dims - 1,
)
)
_dim = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.lists(
x=helpers.ints(min_value=2, max_value=3),
min_size=num_arrays,
max_size=num_arrays,
)
)
min_dim = min(unique_dims)
max_dim = max(unique_dims)
_idx = draw(
helpers.array_values(
shape=min_dim,
dtype="int64",
min_value=0,
max_value=max_dim,
exclude_min=False,
)
)
xs = []
# available_input_types = draw(helpers.get_dtypes("integer"))
available_input_types = ["int32", "int64", "float16", "float32", "float64"]
input_dtypes = draw(
helpers.array_dtypes(
available_dtypes=available_input_types,
num_arrays=num_arrays,
shared_dtype=True,
)
)
for ud, dt in zip(unique_dims, input_dtypes):
x = draw(
helpers.array_values(
shape=common_shape[:_dim] + [ud] + common_shape[_dim:],
dtype=dt,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
)
)
xs.append(x)
return xs, input_dtypes, _dim, _idx
# concat
@st.composite
def _arrays_idx_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays")
)
common_shape = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_dims - 1,
)
)
unique_idx = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_arrays,
)
)
xs = []
input_dtypes = draw(
helpers.array_dtypes(available_dtypes=draw(helpers.get_dtypes("valid")))
)
dtype = draw(st.sampled_from(input_dtypes))
for ud in unique_dims:
x = draw(
helpers.array_values(
shape=common_shape[:unique_idx] + [ud] + common_shape[unique_idx:],
dtype=dtype,
)
)
xs.append(x)
input_dtypes = [dtype] * len(input_dtypes)
return xs, input_dtypes, unique_idx
@st.composite
def _broadcast_to_helper(draw):
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=6,
)
)
dtype, x = dtype_and_x
input_shape = x[0].shape
max_num_dims = 6 - len(input_shape)
shape = draw(helpers.get_shape(max_num_dims=max_num_dims)) + input_shape
return dtype, x, shape
# flip
@st.composite
def _dtype_x_axis(draw, **kwargs):
dtype, x, shape = draw(helpers.dtype_and_values(**kwargs, ret_shape=True))
axis = draw(
st.lists(
helpers.ints(min_value=0, max_value=len(shape) - 1),
min_size=len(shape),
max_size=len(shape),
unique=True,
)
)
return dtype, x, axis
# expand
@st.composite
def _expand_helper(draw):
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=6,
)
)
dtype, x = dtype_and_x
input_shape = x[0].shape
max_num_dims = 6 - len(input_shape)
shape = draw(helpers.get_shape(max_num_dims=max_num_dims)) + input_shape
return dtype, x, shape
@st.composite
def _gather_helper(draw):
dtype_and_param = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=6,
)
)
dtype_and_indices = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=6,
)
)
dtype, param = dtype_and_param
dtype, indices = dtype_and_indices
return dtype, param, indices
# split
@st.composite
def _split_helper(draw):
dtypes, values, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
max_num_dims=4,
min_dim_size=2,
max_dim_size=4,
ret_shape=True,
)
)
axis = draw(st.sampled_from(range(len(shape))))
num_eles = shape[axis]
splits = [i for i in range(1, num_eles + 1) if num_eles % i == 0]
num_splits = draw(st.sampled_from(splits))
return dtypes, values, num_splits, axis
# squeeze
@st.composite
def _squeeze_helper(draw):
shape = draw(st.shared(helpers.get_shape(), key="value_shape"))
valid_axes = []
for index, axis in enumerate(shape):
if axis == 1:
valid_axes.append(index)
valid_axes.insert(0, None)
return draw(st.sampled_from(valid_axes))
# tile
@st.composite
def _tile_helper(draw):
dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=4,
min_dim_size=2,
max_dim_size=3,
ret_shape=True,
)
)
repeats = draw(
helpers.list_of_size(
x=helpers.ints(min_value=1, max_value=3),
size=len(shape),
)
)
return dtype, x, repeats
# Helpers #
# ------ #
@st.composite
def dtypes_x_reshape(draw):
shape = draw(helpers.get_shape(min_num_dims=1))
dtypes, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=shape,
)
)
shape = draw(
helpers.get_shape(min_num_dims=1).filter(
lambda s: math.prod(s) == math.prod(shape)
)
)
return dtypes, x, shape
# --- Main --- #
# ------------ #
# abs
@handle_frontend_test(
fn_tree="paddle.abs",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_abs(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="paddle.broadcast_to",
dtype_x_and_shape=_broadcast_to_helper(),
)
def test_paddle_broadcast_to(
*,
dtype_x_and_shape,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, x, shape = dtype_x_and_shape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
shape=shape,
)
# cast
@handle_frontend_test(
fn_tree="paddle.cast",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
dtype=helpers.get_dtypes("valid", full=False),
)
def test_paddle_cast(
*,
dtype_and_x,
dtype,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
dtype=dtype[0],
)
@handle_frontend_test(
fn_tree="paddle.concat",
xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(),
test_with_out=st.just(False),
)
def test_paddle_concat(
*,
xs_n_input_dtypes_n_unique_idx,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
xs, input_dtypes, unique_idx = xs_n_input_dtypes_n_unique_idx
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs,
axis=unique_idx,
)
@handle_frontend_test(
fn_tree="paddle.expand",
dtype_x_and_shape=_expand_helper(),
)
def test_paddle_expand(
*,
dtype_x_and_shape,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, x, shape = dtype_x_and_shape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
shape=shape,
)
@handle_frontend_test(
fn_tree="paddle.flip",
dtype_x_axis=_dtype_x_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
),
test_with_out=st.just(False),
)
def test_paddle_flip(
*,
dtype_x_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
)
@handle_frontend_test(
fn_tree="paddle.gather",
dtype_param_and_indices=_gather_helper(),
)
def test_paddle_gather(
*,
dtype_param_and_indices,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, param, indices = dtype_param_and_indices
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
param=param[0],
indices=indices[0],
)
# gather_nd
@handle_frontend_test(
fn_tree="paddle.gather_nd",
dtype_x_index=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=["int64"],
min_num_dims=5,
max_num_dims=10,
min_dim_size=1,
max_dim_size=5,
indices_same_dims=False,
),
)
def test_paddle_gather_nd(
*,
dtype_x_index,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtypes, x, index, _, _ = dtype_x_index
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x,
index=index,
)
@handle_frontend_test(
fn_tree="paddle.index_add",
xs_dtypes_dim_idx=_arrays_dim_idx_n_dtypes(),
)
def test_paddle_index_add(
*,
xs_dtypes_dim_idx,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
xs, input_dtypes, axis, indices = xs_dtypes_dim_idx
if xs[0].shape[axis] < xs[1].shape[axis]:
source, input = xs
else:
input, source = xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
x=input,
index=indices,
axis=axis,
value=source,
)
# repeat_interleave
@handle_frontend_test(
fn_tree="paddle.repeat_interleave",
dtype_values_repeats_axis_output_size=_get_repeat_interleaves_args(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
max_num_dims=4,
max_dim_size=4,
),
)
def test_paddle_repeat_interleave(
*,
dtype_values_repeats_axis_output_size,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, values, repeats, axis, _ = dtype_values_repeats_axis_output_size
helpers.test_frontend_function(
input_dtypes=[dtype[0][0], dtype[1][0]],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=values[0],
repeats=repeats[0],
axis=axis,
)
# Tests #
# ----- #
# reshape
@handle_frontend_test(
fn_tree="paddle.reshape",
dtypes_x_reshape=dtypes_x_reshape(),
)
def test_paddle_reshape(
*,
dtypes_x_reshape,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, shape = dtypes_x_reshape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
shape=shape,
)
# roll
@handle_frontend_test(
fn_tree="paddle.roll",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
min_dim_size=2,
),
shift=helpers.ints(min_value=1, max_value=10),
axis=helpers.ints(min_value=-1, max_value=1),
test_with_out=st.just(False),
)
def test_paddle_roll(
*,
dtype_and_x,
shift,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
shifts=shift,
axis=axis,
)
# rot90
@handle_frontend_test(
fn_tree="paddle.rot90",
dtype_m_k_axes=_get_dtype_values_k_axes_for_rot90(
available_dtypes=helpers.get_dtypes(kind="valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
)
def test_paddle_rot90(
*,
dtype_m_k_axes,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, m, k, axes = dtype_m_k_axes
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=m,
k=k,
axes=tuple(axes),
)
@handle_frontend_test(
fn_tree="paddle.split",
dt_x_num_splits_axis=_split_helper(),
test_with_out=st.just(False),
)
def test_paddle_split(
*,
dt_x_num_splits_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, num_splits, axis = dt_x_num_splits_axis
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
num_or_sections=num_splits,
axis=axis,
)
@handle_frontend_test(
fn_tree="paddle.squeeze",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="value_shape"),
),
axis=_squeeze_helper(),
)
def test_paddle_squeeze(
*,
dtype_and_x,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
)
@handle_frontend_test(
fn_tree="paddle.stack",
_arrays_n_dtypes_axis=_arrays_axis_n_dtypes(),
test_with_out=st.just(False),
)
def test_paddle_stack(
*,
_arrays_n_dtypes_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
xs, input_dtypes, axis = _arrays_n_dtypes_axis
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs,
axis=axis,
)
# take_along_axis
@handle_frontend_test(
fn_tree="paddle.take_along_axis",
dtype_indices_axis=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes(kind="valid"),
indices_dtypes=["int64"],
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
indices_same_dims=True,
),
)
def test_paddle_take_along_axis(
*,
dtype_indices_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, value, indices, axis, _ = dtype_indices_axis
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
arr=value,
indices=indices,
axis=axis,
)
@handle_frontend_test(
fn_tree="paddle.tile",
dt_x_repeats=_tile_helper(),
test_with_out=st.just(False),
)
def test_paddle_tile(
*,
dt_x_repeats,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtypes, x, repeats = dt_x_repeats
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
repeat_times=repeats,
)
@handle_frontend_test(
fn_tree="paddle.tolist",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
test_with_out=st.just(False),
)
def test_paddle_tolist(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
x_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# unbind
@handle_frontend_test(
fn_tree="paddle.unbind",
dtypes_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
max_num_dims=2,
max_dim_size=1,
),
number_positional_args=st.just(1),
axis=st.integers(-1, 0),
test_with_out=st.just(False),
)
def test_paddle_unbind(
*,
dtypes_values,
axis,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
x_dtype, x = dtypes_values
axis = axis
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
)
# unstack
@handle_frontend_test(
fn_tree="paddle.unstack",
dtypes_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=2,
max_dim_size=1,
),
number_positional_args=st.just(1),
axis=st.integers(-1, 0),
test_with_out=st.just(False),
)
def test_paddle_unstack(
*,
dtypes_values,
axis,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
x_dtype, x = dtypes_values
axis = axis
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_manipulation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_manipulation.py",
"repo_id": "ivy",
"token_count": 11371
} | 57 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_statistical import (
_statistical_dtype_values,
)
# mean
@handle_frontend_test(
fn_tree="paddle.mean",
dtype_and_x=_statistical_dtype_values(function="mean"),
keepdim=st.booleans(),
test_with_out=st.just(True),
)
def test_paddle_mean(
*,
dtype_and_x,
keepdim,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_and_x[:3]
test_flags.num_positional_args = len(dtype_and_x) - 2
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
input=x[0],
axis=axis,
keepdim=keepdim,
)
# median
@handle_frontend_test(
fn_tree="paddle.median",
dtype_x_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_value=-1e10,
max_value=1e10,
valid_axis=True,
force_int_axis=True,
),
keepdim=st.booleans(),
)
def test_paddle_median(
dtype_x_and_axis, keepdim, backend_fw, frontend, test_flags, fn_tree
):
input_dtypes, x, axis = dtype_x_and_axis
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
axis=axis,
keepdim=keepdim,
)
@handle_frontend_test(
fn_tree="paddle.nanmedian",
dtype_x_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_value=-1e10,
max_value=1e10,
valid_axis=True,
force_int_axis=True,
),
keepdim=st.booleans(),
)
def test_paddle_nanmedian(
dtype_x_and_axis,
keepdim,
frontend,
backend_fw,
test_flags,
fn_tree,
):
input_dtypes, x, axis = dtype_x_and_axis
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
axis=axis,
keepdim=keepdim,
)
# numel
@handle_frontend_test(
fn_tree="paddle.numel",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_numel(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# std
@handle_frontend_test(
fn_tree="paddle.std",
dtype_and_x=_statistical_dtype_values(function="std"),
unbiased=st.booleans(),
keepdim=st.booleans(),
)
def test_paddle_std(
*,
unbiased,
dtype_and_x,
keepdim,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis, _ = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
axis=axis,
unbiased=unbiased,
keepdim=keepdim,
)
# var
@handle_frontend_test(
fn_tree="paddle.var",
dtype_and_x=_statistical_dtype_values(function="var"),
unbiased=st.booleans(),
keepdim=st.booleans(),
)
def test_paddle_var(
*,
unbiased,
dtype_and_x,
keepdim,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis, _ = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
axis=axis,
unbiased=unbiased,
keepdim=keepdim,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_stat.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_stat.py",
"repo_id": "ivy",
"token_count": 2201
} | 58 |