From b866b08c54d2c267ecf65b9cc1fd567dade4079c Mon Sep 17 00:00:00 2001 From: Suha Siddiqui Date: Wed, 8 Jun 2022 13:46:05 -0400 Subject: [PATCH] includes classes, fxn, module --- src/meta.json | 2 +- src/views/Home.vue | 50 +++++++++++++++++++++++++++++++--------------- 2 files changed, 35 insertions(+), 17 deletions(-) diff --git a/src/meta.json b/src/meta.json index 2ec3532..f7f2879 100644 --- a/src/meta.json +++ b/src/meta.json @@ -1 +1 @@ -[["tf.AggregationMethod", "description: A class listing aggregation methods used to combine gradients.", false, "A class listing aggregation methods used to combine gradients.\n\n Computing partial derivatives can require aggregating gradient\n contributions. This class lists the various methods that can\n be used to combine gradients in the graph.\n\n The following aggregation methods are part of the stable API for\n aggregating gradients:\n\n * `ADD_N`: All of the gradient terms are summed as part of one\n operation using the \"AddN\" op (see `tf.add_n`). This\n method has the property that all gradients must be ready and\n buffered separately in memory before any aggregation is performed.\n * `DEFAULT`: The system-chosen default aggregation method.\n\n The following aggregation methods are experimental and may not\n be supported in future releases:\n\n * `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using\n the \"AddN\" op. This method of summing gradients may reduce\n performance, but it can improve memory utilization because the\n gradients can be released earlier.\n\n "], ["tf.argsort", "description: Returns the indices of a tensor that give its sorted order along an axis.", false, "Returns the indices of a tensor that give its sorted order along an axis.\n\n >>> values = [1, 10, 26.9, 2.8, 166.32, 62.3]\n >>> sort_order = tf.argsort(values)\n >>> sort_order.numpy()\n array([0, 3, 1, 2, 5, 4], dtype=int32)\n\n For a 1D tensor:\n\n >>> sorted = tf.gather(values, sort_order)\n >>> assert tf.reduce_all(sorted == tf.sort(values))\n\n For higher dimensions, the output has the same shape as\n `values`, but along the given axis, values represent the index of the sorted\n element in that slice of the tensor at the given position.\n\n >>> mat = [[30,20,10],\n ... [20,10,30],\n ... [10,30,20]]\n >>> indices = tf.argsort(mat)\n >>> indices.numpy()\n array([[2, 1, 0],\n [1, 0, 2],\n [0, 2, 1]], dtype=int32)\n\n If `axis=-1` these indices can be used to apply a sort using `tf.gather`:\n\n >>> tf.gather(mat, indices, batch_dims=-1).numpy()\n array([[10, 20, 30],\n [10, 20, 30],\n [10, 20, 30]], dtype=int32)\n\n See also:\n\n * `tf.sort`: Sort along an axis.\n * `tf.math.top_k`: A partial sort that returns a fixed number of top values\n and corresponding indices.\n\n Args:\n values: 1-D or higher **numeric** `Tensor`.\n axis: The axis along which to sort. The default is -1, which sorts the last\n axis.\n direction: The direction in which to sort the values (`'ASCENDING'` or\n `'DESCENDING'`).\n stable: If True, equal elements in the original tensor will not be\n re-ordered in the returned order. Unstable sort is not yet implemented,\n but will eventually be the default for performance reasons. If you require\n a stable order, pass `stable=True` for forwards compatibility.\n name: Optional name for the operation.\n\n Returns:\n An int32 `Tensor` with the same shape as `values`. The indices that would\n sort each slice of the given `values` along the given `axis`.\n\n Raises:\n ValueError: If axis is not a constant scalar, or the direction is invalid.\n tf.errors.InvalidArgumentError: If the `values.dtype` is not a `float` or\n `int` type.\n "], ["tf.audio", "description: Public API for tf.audio namespace.", true, "Public API for tf.audio namespace.\n"], ["tf.autodiff", "description: Public API for tf.autodiff namespace.", true, "Public API for tf.autodiff namespace.\n"], ["tf.autograph", "description: Conversion of eager-style Python into TensorFlow graph code.", true, "Conversion of eager-style Python into TensorFlow graph code.\n\nNOTE: In TensorFlow 2.0, AutoGraph is automatically applied when using\n`tf.function`. This module contains lower-level APIs for advanced use.\n\nAutoGraph transforms a subset of Python which operates on TensorFlow objects\ninto equivalent TensorFlow graph code. When executing the graph, it has the same\neffect as if you ran the original code in eager mode.\nPython code which doesn't operate on TensorFlow objects remains functionally\nunchanged, but keep in mind that `tf.function` only executes such code at trace\ntime, and generally will not be consistent with eager execution.\n\nFor more information, see the\n[AutoGraph reference documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md),\nand the [tf.function guide](https://www.tensorflow.org/guide/function#autograph_transformations).\n\n"], ["tf.batch_to_space", "description: BatchToSpace for N-D tensors of type T.", false, "BatchToSpace for N-D tensors of type T.\n\n This operation reshapes the \"batch\" dimension 0 into `M + 1` dimensions of\n shape `block_shape + [batch]`, interleaves these blocks back into the grid\n defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the\n same rank as the input. The spatial dimensions of this intermediate result\n are then optionally cropped according to `crops` to produce the output. This\n is the reverse of SpaceToBatch (see `tf.space_to_batch`).\n\n Args:\n input: A N-D `Tensor` with shape `input_shape = [batch] + spatial_shape +\n remaining_shape`, where `spatial_shape` has M dimensions.\n block_shape: A 1-D `Tensor` with shape [M]. Must be one of the following\n types: `int32`, `int64`. All values must be >= 1. For backwards\n compatibility with TF 1.0, this parameter may be an int, in which case it\n is converted to\n `numpy.array([block_shape, block_shape],\n dtype=numpy.int64)`.\n crops: A 2-D `Tensor` with shape `[M, 2]`. Must be one of the\n following types: `int32`, `int64`. All values must be >= 0.\n `crops[i] = [crop_start, crop_end]` specifies the amount to crop from\n input dimension `i + 1`, which corresponds to spatial dimension `i`.\n It is required that\n `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n This operation is equivalent to the following steps:\n 1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,\n block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,\n input_shape[N-1]]\n 2. Permute dimensions of `reshaped` to produce `permuted` of shape\n [batch / prod(block_shape), input_shape[1], block_shape[0], ...,\n input_shape[M], block_shape[M-1], input_shape[M+1],\n ..., input_shape[N-1]]\n 3. Reshape `permuted` to produce `reshaped_permuted` of shape\n [batch / prod(block_shape), input_shape[1] * block_shape[0], ...,\n input_shape[M] * block_shape[M-1], input_shape[M+1], ...,\n input_shape[N-1]]\n 4. Crop the start and end of dimensions `[1, ..., M]` of\n `reshaped_permuted` according to `crops` to produce the output\n of shape:\n [batch / prod(block_shape), input_shape[1] *\n block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *\n block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1],\n ..., input_shape[N-1]]\n name: A name for the operation (optional).\n\n Examples:\n\n 1. For the following input of shape `[4, 1, 1, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n [[[[1]]],\n [[[2]]],\n [[[3]]],\n [[[4]]]]\n ```\n\n The output tensor has shape `[1, 2, 2, 1]` and value:\n\n ```\n x = [[[[1], [2]],\n [[3], [4]]]]\n ```\n\n 2. For the following input of shape `[4, 1, 1, 3]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n [[[1, 2, 3]],\n [[4, 5, 6]],\n [[7, 8, 9]],\n [[10, 11, 12]]]\n ```\n\n The output tensor has shape `[1, 2, 2, 3]` and value:\n\n ```python\n x = [[[[1, 2, 3], [4, 5, 6 ]],\n [[7, 8, 9], [10, 11, 12]]]]\n ```\n\n 3. For the following\n input of shape `[4, 2, 2, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n x = [[[[1], [3]], [[ 9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n ```\n\n The output tensor has shape `[1, 4, 4, 1]` and value:\n\n ```python\n x = [[[1], [2], [ 3], [ 4]],\n [[5], [6], [ 7], [ 8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]\n ```\n\n 4. For the following input of shape\n `[8, 1, 3, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:\n\n ```python\n x = [[[[0], [ 1], [ 3]]],\n [[[0], [ 9], [11]]],\n [[[0], [ 2], [ 4]]],\n [[[0], [10], [12]]],\n [[[0], [ 5], [ 7]]],\n [[[0], [13], [15]]],\n [[[0], [ 6], [ 8]]],\n [[[0], [14], [16]]]]\n ```\n\n The output tensor has shape `[2, 2, 4, 1]` and value:\n\n ```python\n x = [[[[ 1], [ 2], [ 3], [ 4]],\n [[ 5], [ 6], [ 7], [ 8]]],\n [[[ 9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "], ["tf.bitcast", "description: Bitcasts a tensor from one type to another without copying data.", false, "Bitcasts a tensor from one type to another without copying data.\n\n Given a tensor `input`, this operation returns a tensor that has the same buffer\n data as `input` with datatype `type`.\n\n If the input datatype `T` is larger than the output datatype `type` then the\n shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].\n\n If `T` is smaller than `type`, the operator requires that the rightmost\n dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from\n [..., sizeof(`type`)/sizeof(`T`)] to [...].\n\n tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype\n (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()\n gives module error.\n For example,\n\n Example 1:\n\n >>> a = [1., 2., 3.]\n >>> equality_bitcast = tf.bitcast(a, tf.complex128)\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]\n >>> equality_cast = tf.cast(a, tf.complex128)\n >>> print(equality_cast)\n tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)\n\n Example 2:\n\n >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)\n \n\n Example 3:\n\n >>> x = [1., 2., 3.]\n >>> y = [0., 2., 3.]\n >>> equality= tf.equal(x,y)\n >>> equality_cast = tf.cast(equality,tf.float32)\n >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)\n >>> print(equality)\n tf.Tensor([False True True], shape=(3,), dtype=bool)\n >>> print(equality_cast)\n tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)\n >>> print(equality_bitcast)\n tf.Tensor(\n [[ 0 0 0 0]\n [ 0 0 128 63]\n [ 0 0 128 63]], shape=(3, 4), dtype=uint8)\n\n *NOTE*: Bitcast is implemented as a low-level cast, so machines with different\n endian orderings will give different results.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`.\n type: A `tf.DType` from: `tf.bfloat16, tf.half, tf.float32, tf.float64, tf.int64, tf.int32, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16, tf.complex64, tf.complex128, tf.qint8, tf.quint8, tf.qint16, tf.quint16, tf.qint32`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `type`.\n "], ["tf.bitwise", "description: Operations for manipulating the binary representations of integers.", true, "Operations for manipulating the binary representations of integers.\n"], ["tf.boolean_mask", "description: Apply boolean mask to tensor.", false, "Apply boolean mask to tensor.\n\n Numpy equivalent is `tensor[mask]`.\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n The `axis` could be used with `mask` to indicate the axis to mask from.\n In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match\n the first `axis + dim(mask)` dimensions of `tensor`'s shape.\n\n See also: `tf.ragged.boolean_mask`, which can be applied to both dense and\n ragged tensors, and can be used if you need to preserve the masked dimensions\n of `tensor` (rather than flattening them, as `tf.boolean_mask` does).\n\n Examples:\n\n >>> tensor = [0, 1, 2, 3] # 1-D example\n >>> mask = np.array([True, False, True, False])\n >>> tf.boolean_mask(tensor, mask)\n \n\n >>> tensor = [[1, 2], [3, 4], [5, 6]] # 2-D example\n >>> mask = np.array([True, False, True])\n >>> tf.boolean_mask(tensor, mask)\n \n\n Args:\n tensor: N-D Tensor.\n mask: K-D boolean Tensor, K <= N and K must be known statically.\n axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By\n default, axis is 0 which will mask from the first dimension. Otherwise K +\n axis <= N.\n name: A name for this operation (optional).\n\n Returns:\n (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding\n to `True` values in `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n\n Examples:\n\n ```python\n # 2-D example\n tensor = [[1, 2], [3, 4], [5, 6]]\n mask = np.array([True, False, True])\n boolean_mask(tensor, mask) # [[1, 2], [5, 6]]\n ```\n "], ["tf.broadcast_dynamic_shape", "description: Computes the shape of a broadcast given symbolic shapes.", false, "Computes the shape of a broadcast given symbolic shapes.\n\n When `shape_x` and `shape_y` are Tensors representing shapes (i.e. the result\n of calling tf.shape on another Tensor) this computes a Tensor which is the\n shape of the result of a broadcasting op applied in tensors of shapes\n `shape_x` and `shape_y`.\n\n This is useful when validating the result of a broadcasting operation when the\n tensors do not have statically known shapes.\n\n Example:\n\n >>> shape_x = (1, 2, 3)\n >>> shape_y = (5, 1, 3)\n >>> tf.broadcast_dynamic_shape(shape_x, shape_y)\n \n\n Args:\n shape_x: A rank 1 integer `Tensor`, representing the shape of x.\n shape_y: A rank 1 integer `Tensor`, representing the shape of y.\n\n Returns:\n A rank 1 integer `Tensor` representing the broadcasted shape.\n\n Raises:\n InvalidArgumentError: If the two shapes are incompatible for\n broadcasting.\n "], ["tf.broadcast_static_shape", "description: Computes the shape of a broadcast given known shapes.", false, "Computes the shape of a broadcast given known shapes.\n\n When `shape_x` and `shape_y` are fully known `TensorShape`s this computes a\n `TensorShape` which is the shape of the result of a broadcasting op applied in\n tensors of shapes `shape_x` and `shape_y`.\n\n For example, if shape_x is `TensorShape([1, 2, 3])` and shape_y is\n `TensorShape([5, 1, 3])`, the result is a TensorShape whose value is\n `TensorShape([5, 2, 3])`.\n\n This is useful when validating the result of a broadcasting operation when the\n tensors have statically known shapes.\n\n Example:\n\n >>> shape_x = tf.TensorShape([1, 2, 3])\n >>> shape_y = tf.TensorShape([5, 1 ,3])\n >>> tf.broadcast_static_shape(shape_x, shape_y)\n TensorShape([5, 2, 3])\n\n Args:\n shape_x: A `TensorShape`\n shape_y: A `TensorShape`\n\n Returns:\n A `TensorShape` representing the broadcasted shape.\n\n Raises:\n ValueError: If the two shapes can not be broadcasted.\n "], ["tf.broadcast_to", "description: Broadcast an array for a compatible shape.", false, "Broadcast an array for a compatible shape.\n\n Broadcasting is the process of making arrays to have compatible shapes\n for arithmetic operations. Two shapes are compatible if for each\n dimension pair they are either equal or one of them is one. When trying\n to broadcast a Tensor to a shape, it starts with the trailing dimensions,\n and works its way forward.\n\n For example,\n\n >>> x = tf.constant([1, 2, 3])\n >>> y = tf.broadcast_to(x, [3, 3])\n >>> print(y)\n tf.Tensor(\n [[1 2 3]\n [1 2 3]\n [1 2 3]], shape=(3, 3), dtype=int32)\n\n In the above example, the input Tensor with the shape of `[1, 3]`\n is broadcasted to output Tensor with shape of `[3, 3]`.\n\n When doing broadcasted operations such as multiplying a tensor\n by a scalar, broadcasting (usually) confers some time or space\n benefit, as the broadcasted tensor is never materialized.\n\n However, `broadcast_to` does not carry with it any such benefits.\n The newly-created tensor takes the full memory of the broadcasted\n shape. (In a graph context, `broadcast_to` might be fused to\n subsequent operation and then be optimized away, however.)\n\n Args:\n input: A `Tensor`. A Tensor to broadcast.\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n An 1-D `int` Tensor. The shape of the desired output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "], ["tf.case", "description: Create a case operation.", false, "Create a case operation.\n\n See also `tf.switch_case`.\n\n The `pred_fn_pairs` parameter is a list of pairs of size N.\n Each pair contains a boolean scalar tensor and a python callable that\n creates the tensors to be returned if the boolean evaluates to True.\n `default` is a callable generating a list of tensors. All the callables\n in `pred_fn_pairs` as well as `default` (if provided) should return the same\n number and types of tensors.\n\n If `exclusive==True`, all predicates are evaluated, and an exception is\n thrown if more than one of the predicates evaluates to `True`.\n If `exclusive==False`, execution stops at the first predicate which\n evaluates to True, and the tensors generated by the corresponding function\n are returned immediately. If none of the predicates evaluate to True, this\n operation returns the tensors generated by `default`.\n\n `tf.case` supports nested structures as implemented in\n `tf.nest`. All of the callables must return the same (possibly nested) value\n structure of lists, tuples, and/or named tuples. Singleton lists and tuples\n form the only exceptions to this: when returned by a callable, they are\n implicitly unpacked to single values. This behavior is disabled by passing\n `strict=True`.\n\n @compatibility(v2)\n `pred_fn_pairs` could be a dictionary in v1. However, tf.Tensor and\n tf.Variable are no longer hashable in v2, so cannot be used as a key for a\n dictionary. Please use a list or a tuple instead.\n @end_compatibility\n\n\n **Example 1:**\n\n Pseudocode:\n\n ```\n if (x < y) return 17;\n else return 23;\n ```\n\n Expressions:\n\n ```python\n f1 = lambda: tf.constant(17)\n f2 = lambda: tf.constant(23)\n r = tf.case([(tf.less(x, y), f1)], default=f2)\n ```\n\n **Example 2:**\n\n Pseudocode:\n\n ```\n if (x < y && x > z) raise OpError(\"Only one predicate may evaluate to True\");\n if (x < y) return 17;\n else if (x > z) return 23;\n else return -1;\n ```\n\n Expressions:\n\n ```python\n def f1(): return tf.constant(17)\n def f2(): return tf.constant(23)\n def f3(): return tf.constant(-1)\n r = tf.case([(tf.less(x, y), f1), (tf.greater(x, z), f2)],\n default=f3, exclusive=True)\n ```\n\n Args:\n pred_fn_pairs: List of pairs of a boolean scalar tensor and a callable which\n returns a list of tensors.\n default: Optional callable that returns a list of tensors.\n exclusive: True iff at most one predicate is allowed to evaluate to `True`.\n strict: A boolean that enables/disables 'strict' mode; see above.\n name: A name for this operation (optional).\n\n Returns:\n The tensors returned by the first pair whose predicate evaluated to True, or\n those returned by `default` if none does.\n\n Raises:\n TypeError: If `pred_fn_pairs` is not a list/tuple.\n TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\n TypeError: If `fns[i]` is not callable for any i, or `default` is not\n callable.\n "], ["tf.cast", "description: Casts a tensor to a new type.", false, "Casts a tensor to a new type.\n\n The operation casts `x` (in case of `Tensor`) or `x.values`\n (in case of `SparseTensor` or `IndexedSlices`) to `dtype`.\n\n For example:\n\n >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)\n >>> tf.cast(x, tf.int32)\n \n\n Notice `tf.cast` has an alias `tf.dtypes.cast`:\n\n >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)\n >>> tf.dtypes.cast(x, tf.int32)\n \n\n The operation supports data types (for `x` and `dtype`) of\n `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,\n `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.\n In case of casting from complex types (`complex64`, `complex128`) to real\n types, only the real part of `x` is returned. In case of casting from real\n types to complex types (`complex64`, `complex128`), the imaginary part of the\n returned value is set to `0`. The handling of complex types here matches the\n behavior of numpy.\n\n Note casting nan and inf values to integral types has undefined behavior.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could\n be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,\n `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,\n `bfloat16`.\n dtype: The destination type. The list of supported dtypes is the same as\n `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and\n same type as `dtype`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `dtype`.\n "], ["tf.clip_by_global_norm", "description: Clips values of multiple tensors by the ratio of the sum of their norms.", false, "Clips values of multiple tensors by the ratio of the sum of their norms.\n\n Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,\n this operation returns a list of clipped tensors `list_clipped`\n and the global norm (`global_norm`) of all tensors in `t_list`. Optionally,\n if you've already computed the global norm for `t_list`, you can specify\n the global norm with `use_norm`.\n\n To perform the clipping, the values `t_list[i]` are set to:\n\n t_list[i] * clip_norm / max(global_norm, clip_norm)\n\n where:\n\n global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))\n\n If `clip_norm > global_norm` then the entries in `t_list` remain as they are,\n otherwise they're all shrunk by the global ratio.\n\n If `global_norm == infinity` then the entries in `t_list` are all set to `NaN`\n to signal that an error occurred.\n\n Any of the entries of `t_list` that are of type `None` are ignored.\n\n This is the correct way to perform gradient clipping (Pascanu et al., 2012).\n\n However, it is slower than `clip_by_norm()` because all the parameters must be\n ready before the clipping operation can be performed.\n\n Args:\n t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.\n clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.\n use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global\n norm to use. If not provided, `global_norm()` is used to compute the norm.\n name: A name for the operation (optional).\n\n Returns:\n list_clipped: A list of `Tensors` of the same type as `list_t`.\n global_norm: A 0-D (scalar) `Tensor` representing the global norm.\n\n Raises:\n TypeError: If `t_list` is not a sequence.\n\n References:\n On the difficulty of training Recurrent Neural Networks:\n [Pascanu et al., 2012](http://proceedings.mlr.press/v28/pascanu13.html)\n ([pdf](http://proceedings.mlr.press/v28/pascanu13.pdf))\n "], ["tf.clip_by_norm", "description: Clips tensor values to a maximum L2-norm.", false, "Clips tensor values to a maximum L2-norm.\n\n Given a tensor `t`, and a maximum clip value `clip_norm`, this operation\n normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,\n along the dimensions given in `axes`. Specifically, in the default case\n where all dimensions are used for calculation, if the L2-norm of `t` is\n already less than or equal to `clip_norm`, then `t` is not modified. If\n the L2-norm is greater than `clip_norm`, then this operation returns a\n tensor of the same type and shape as `t` with its values set to:\n\n `t * clip_norm / l2norm(t)`\n\n In this case, the L2-norm of the output tensor is `clip_norm`.\n\n As another example, if `t` is a matrix and `axes == [1]`, then each row\n of the output will have L2-norm less than or equal to `clip_norm`. If\n `axes == [0]` instead, each column of the output will be clipped.\n\n Code example:\n\n >>> some_nums = tf.constant([[1, 2, 3, 4, 5]], dtype=tf.float32)\n >>> tf.clip_by_norm(some_nums, 2.0).numpy()\n array([[0.26967996, 0.5393599 , 0.80903983, 1.0787199 , 1.3483998 ]],\n dtype=float32)\n\n This operation is typically used to clip gradients before applying them with\n an optimizer. Most gradient data is a collection of different shaped tensors\n for different parts of the model. Thus, this is a common usage:\n\n ```\n # Get your gradients after training\n loss_value, grads = grad(model, features, labels)\n\n # Apply some clipping\n grads = [tf.clip_by_norm(g, norm)\n for g in grads]\n\n # Continue on with training\n optimizer.apply_gradients(grads)\n ```\n\n Args:\n t: A `Tensor` or `IndexedSlices`. This must be a floating point type.\n clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value, also\n floating point\n axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions\n to use for computing the L2-norm. If `None` (the default), uses all\n dimensions.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor` or `IndexedSlices`.\n\n Raises:\n ValueError: If the clip_norm tensor is not a 0-D scalar tensor.\n TypeError: If dtype of the input is not a floating point or\n complex type.\n "], ["tf.clip_by_value", "description: Clips tensor values to a specified min and max.", false, "Clips tensor values to a specified min and max.\n\n Given a tensor `t`, this operation returns a tensor of the same type and\n shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.\n Any values less than `clip_value_min` are set to `clip_value_min`. Any values\n greater than `clip_value_max` are set to `clip_value_max`.\n\n Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for\n correct results.\n\n For example:\n\n Basic usage passes a scalar as the min and max value.\n\n >>> t = tf.constant([[-10., -1., 0.], [0., 2., 10.]])\n >>> t2 = tf.clip_by_value(t, clip_value_min=-1, clip_value_max=1)\n >>> t2.numpy()\n array([[-1., -1., 0.],\n [ 0., 1., 1.]], dtype=float32)\n\n The min and max can be the same size as `t`, or broadcastable to that size.\n\n >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n >>> clip_min = [[2],[1]]\n >>> t3 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\n >>> t3.numpy()\n array([[ 2., 2., 10.],\n [ 1., 1., 10.]], dtype=float32)\n\n Broadcasting fails, intentionally, if you would expand the dimensions of `t`\n\n >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n >>> clip_min = [[[2, 1]]] # Has a third axis\n >>> t4 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Incompatible shapes: [2,3] vs. [1,1,2]\n\n It throws a `TypeError` if you try to clip an `int` to a `float` value\n (`tf.cast` the input to `float` first).\n\n >>> t = tf.constant([[1, 2], [3, 4]], dtype=tf.int32)\n >>> t5 = tf.clip_by_value(t, clip_value_min=-3.1, clip_value_max=3.1)\n Traceback (most recent call last):\n ...\n TypeError: Cannot convert ...\n\n\n Args:\n t: A `Tensor` or `IndexedSlices`.\n clip_value_min: The minimum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n clip_value_max: The maximum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor` or `IndexedSlices`.\n\n Raises:\n `tf.errors.InvalidArgumentError`: If the clip tensors would trigger array\n broadcasting that would make the returned tensor larger than the input.\n TypeError: If dtype of the input is `int32` and dtype of\n the `clip_value_min` or `clip_value_max` is `float32`\n "], ["tf.compat", "description: Compatibility functions.", true, "Compatibility functions.\n\nThe `tf.compat` module contains two sets of compatibility functions.\n\n## Tensorflow 1.x and 2.x APIs\n\nThe `compat.v1` and `compat.v2` submodules provide a complete copy of both the\n`v1` and `v2` APIs for backwards and forwards compatibility across TensorFlow\nversions 1.x and 2.x. See the\n[migration guide](https://www.tensorflow.org/guide/migrate) for details.\n\n## Utilities for writing compatible code\n\nAside from the `compat.v1` and `compat.v2` submodules, `tf.compat` also contains\na set of helper functions for writing code that works in both:\n\n* TensorFlow 1.x and 2.x\n* Python 2 and 3\n\n\n## Type collections\n\nThe compatibility module also provides the following aliases for common\nsets of python types:\n\n* `bytes_or_text_types`\n* `complex_types`\n* `integral_types`\n* `real_types`\n\n"], ["tf.concat", "description: Concatenates tensors along one dimension.", false, "Concatenates tensors along one dimension.\n\n See also `tf.tile`, `tf.stack`, `tf.repeat`.\n\n Concatenates the list of tensors `values` along dimension `axis`. If\n `values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated\n result has shape\n\n [D0, D1, ... Raxis, ...Dn]\n\n where\n\n Raxis = sum(Daxis(i))\n\n That is, the data from the input tensors is joined along the `axis`\n dimension.\n\n The number of dimensions of the input tensors must match, and all dimensions\n except `axis` must be equal.\n\n For example:\n\n >>> t1 = [[1, 2, 3], [4, 5, 6]]\n >>> t2 = [[7, 8, 9], [10, 11, 12]]\n >>> tf.concat([t1, t2], 0)\n \n\n >>> tf.concat([t1, t2], 1)\n \n\n As in Python, the `axis` could also be negative numbers. Negative `axis`\n are interpreted as counting from the end of the rank, i.e.,\n `axis + rank(values)`-th dimension.\n\n For example:\n\n >>> t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]\n >>> t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]\n >>> tf.concat([t1, t2], -1)\n \n\n Note: If you are concatenating along a new axis consider using stack.\n E.g.\n\n ```python\n tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)\n ```\n\n can be rewritten as\n\n ```python\n tf.stack(tensors, axis=axis)\n ```\n\n Args:\n values: A list of `Tensor` objects or a single `Tensor`.\n axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be\n in the range `[-rank(values), rank(values))`. As in Python, indexing for\n axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers\n to `axis`-th dimension. And negative axis refers to `axis +\n rank(values)`-th dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` resulting from concatenation of the input tensors.\n "], ["tf.cond", "description: Return true_fn() if the predicate pred is true else false_fn().", false, "Return `true_fn()` if the predicate `pred` is true else `false_fn()`.\n\n `true_fn` and `false_fn` both return lists of output tensors. `true_fn` and\n `false_fn` must have the same non-zero number and type of outputs.\n\n **WARNING**: Any Tensors or Operations created outside of `true_fn` and\n `false_fn` will be executed regardless of which branch is selected at runtime.\n\n Although this behavior is consistent with the dataflow model of TensorFlow,\n it has frequently surprised users who expected a lazier semantics.\n Consider the following simple program:\n\n ```python\n z = tf.multiply(a, b)\n result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))\n ```\n\n If `x < y`, the `tf.add` operation will be executed and `tf.square`\n operation will not be executed. Since `z` is needed for at least one\n branch of the `cond`, the `tf.multiply` operation is always executed,\n unconditionally.\n\n Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the\n call to `cond`, and not at all during `Session.run()`). `cond`\n stitches together the graph fragments created during the `true_fn` and\n `false_fn` calls with some additional graph nodes to ensure that the right\n branch gets executed depending on the value of `pred`.\n\n `tf.cond` supports nested structures as implemented in\n `tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the\n same (possibly nested) value structure of lists, tuples, and/or named tuples.\n Singleton lists and tuples form the only exceptions to this: when returned by\n `true_fn` and/or `false_fn`, they are implicitly unpacked to single values.\n\n Note: It is illegal to \"directly\" use tensors created inside a cond branch\n outside it, e.g. by storing a reference to a branch tensor in the python\n state. If you need to use a tensor created in a branch function you should\n return it as an output of the branch function and use the output from\n `tf.cond` instead.\n\n Args:\n pred: A scalar determining whether to return the result of `true_fn` or\n `false_fn`.\n true_fn: The callable to be performed if pred is true.\n false_fn: The callable to be performed if pred is false.\n name: Optional name prefix for the returned tensors.\n\n Returns:\n Tensors returned by the call to either `true_fn` or `false_fn`. If the\n callables return a singleton list, the element is extracted from the list.\n\n Raises:\n TypeError: if `true_fn` or `false_fn` is not callable.\n ValueError: if `true_fn` and `false_fn` do not return the same number of\n tensors, or return tensors of different types.\n\n Example:\n\n ```python\n x = tf.constant(2)\n y = tf.constant(5)\n def f1(): return tf.multiply(x, 17)\n def f2(): return tf.add(y, 23)\n r = tf.cond(tf.less(x, y), f1, f2)\n # r is set to f1().\n # Operations in f2 (e.g., tf.add) are not executed.\n ```\n\n "], ["tf.config", "description: Public API for tf.config namespace.", true, "Public API for tf.config namespace.\n"], ["tf.constant", "description: Creates a constant tensor from a tensor-like object.", false, "Creates a constant tensor from a tensor-like object.\n\n Note: All eager `tf.Tensor` values are immutable (in contrast to\n `tf.Variable`). There is nothing especially _constant_ about the value\n returned from `tf.constant`. This function is not fundamentally different from\n `tf.convert_to_tensor`. The name `tf.constant` comes from the `value` being\n embedded in a `Const` node in the `tf.Graph`. `tf.constant` is useful\n for asserting that the value can be embedded that way.\n\n If the argument `dtype` is not specified, then the type is inferred from\n the type of `value`.\n\n >>> # Constant 1-D Tensor from a python list.\n >>> tf.constant([1, 2, 3, 4, 5, 6])\n \n >>> # Or a numpy array\n >>> a = np.array([[1, 2, 3], [4, 5, 6]])\n >>> tf.constant(a)\n \n\n If `dtype` is specified, the resulting tensor values are cast to the requested\n `dtype`.\n\n >>> tf.constant([1, 2, 3, 4, 5, 6], dtype=tf.float64)\n \n\n If `shape` is set, the `value` is reshaped to match. Scalars are expanded to\n fill the `shape`:\n\n >>> tf.constant(0, shape=(2, 3))\n \n >>> tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n \n\n `tf.constant` has no effect if an eager Tensor is passed as the `value`, it\n even transmits gradients:\n\n >>> v = tf.Variable([0.0])\n >>> with tf.GradientTape() as g:\n ... loss = tf.constant(v + v)\n >>> g.gradient(loss, v).numpy()\n array([2.], dtype=float32)\n\n But, since `tf.constant` embeds the value in the `tf.Graph` this fails for\n symbolic tensors:\n\n >>> with tf.compat.v1.Graph().as_default():\n ... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32)\n ... t = tf.constant(i)\n Traceback (most recent call last):\n ...\n TypeError: ...\n\n `tf.constant` will create tensors on the current device. Inputs which are\n already tensors maintain their placements unchanged.\n\n Related Ops:\n\n * `tf.convert_to_tensor` is similar but:\n * It has no `shape` argument.\n * Symbolic tensors are allowed to pass through.\n\n >>> with tf.compat.v1.Graph().as_default():\n ... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32)\n ... t = tf.convert_to_tensor(i)\n\n * `tf.fill`: differs in a few ways:\n * `tf.constant` supports arbitrary constants, not just uniform scalar\n Tensors like `tf.fill`.\n * `tf.fill` creates an Op in the graph that is expanded at runtime, so it\n can efficiently represent large tensors.\n * Since `tf.fill` does not embed the value, it can produce dynamically\n sized outputs.\n\n Args:\n value: A constant value (or list) of output type `dtype`.\n dtype: The type of the elements of the resulting tensor.\n shape: Optional dimensions of resulting tensor.\n name: Optional name for the tensor.\n\n Returns:\n A Constant Tensor.\n\n Raises:\n TypeError: if shape is incorrectly specified or unsupported.\n ValueError: if called on a symbolic tensor.\n "], ["tf.constant_initializer", "description: Initializer that generates tensors with constant values.", false, "Initializer that generates tensors with constant values.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n `tf.constant_initializer` returns an object which when called returns a tensor\n populated with the `value` specified in the constructor. This `value` must be\n convertible to the requested `dtype`.\n\n The argument `value` can be a scalar constant value, or a list of\n values. Scalars broadcast to whichever shape is requested from the\n initializer.\n\n If `value` is a list, then the length of the list must be equal to the number\n of elements implied by the desired shape of the tensor. If the total number of\n elements in `value` is not equal to the number of elements required by the\n tensor shape, the initializer will raise a `TypeError`.\n\n Examples:\n\n >>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.constant_initializer(2.))\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> value = [0, 1, 2, 3, 4, 5, 6, 7]\n >>> init = tf.constant_initializer(value)\n >>> # Fitting shape\n >>> tf.Variable(init(shape=[2, 4], dtype=tf.float32))\n \n >>> # Larger shape\n >>> tf.Variable(init(shape=[3, 4], dtype=tf.float32))\n Traceback (most recent call last):\n ...\n TypeError: ...value has 8 elements, shape is (3, 4) with 12 elements...\n >>> # Smaller shape\n >>> tf.Variable(init(shape=[2, 3], dtype=tf.float32))\n Traceback (most recent call last):\n ...\n TypeError: ...value has 8 elements, shape is (2, 3) with 6 elements...\n\n Args:\n value: A Python scalar, list or tuple of values, or a N-dimensional numpy\n array. All elements of the initialized variable will be set to the\n corresponding value in the `value` argument.\n\n Raises:\n TypeError: If the input `value` is not one of the expected types.\n "], ["tf.control_dependencies", "description: Wrapper for Graph.control_dependencies() using the default graph.", false, "Wrapper for `Graph.control_dependencies()` using the default graph.\n\n See `tf.Graph.control_dependencies` for more details.\n\n Note: *In TensorFlow 2 with eager and/or Autograph, you should not require\n this method, as ops execute in the expected order thanks to automatic control\n dependencies.* Only use `tf.control_dependencies` when working with v1\n `tf.Graph` code.\n\n When eager execution is enabled, any callable object in the `control_inputs`\n list will be called.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which must be\n executed or computed before running the operations defined in the context.\n Can also be `None` to clear the control dependencies. If eager execution\n is enabled, any callable object in the `control_inputs` list will be\n called.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n "], ["tf.convert_to_tensor", "description: Converts the given value to a Tensor.", false, "Converts the given `value` to a `Tensor`.\n\n This function converts Python objects of various types to `Tensor`\n objects. It accepts `Tensor` objects, numpy arrays, Python lists,\n and Python scalars.\n\n For example:\n\n >>> import numpy as np\n >>> def my_func(arg):\n ... arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n ... return arg\n\n >>> # The following calls are equivalent.\n ...\n >>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))\n >>> print(value_1)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])\n >>> print(value_2)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))\n >>> print(value_3)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n\n This function can be useful when composing a new operation in Python\n (such as `my_func` in the example above). All standard Python op\n constructors apply this function to each of their Tensor-valued\n inputs, which allows those ops to accept numpy arrays, Python lists,\n and scalars in addition to `Tensor` objects.\n\n Note: This function diverges from default Numpy behavior for `float` and\n `string` types when `None` is present in a Python list or scalar. Rather\n than silently converting `None` values, an error will be thrown.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the type\n is inferred from the type of `value`.\n dtype_hint: Optional element type for the returned tensor, used when dtype\n is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so dtype_hint can be used as a soft preference.\n If the conversion to `dtype_hint` is not possible, this argument has no\n effect.\n name: Optional name to use if a new `Tensor` is created.\n\n Returns:\n A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value` to `dtype`.\n RuntimeError: If a registered conversion function returns an invalid value.\n ValueError: If the `value` is a tensor not of given `dtype` in graph mode.\n "], ["tf.CriticalSection", "description: Critical section.", false, "Critical section.\n\n A `CriticalSection` object is a resource in the graph which executes subgraphs\n in **serial** order. A common example of a subgraph one may wish to run\n exclusively is the one given by the following function:\n\n ```python\n v = resource_variable_ops.ResourceVariable(0.0, name=\"v\")\n\n def count():\n value = v.read_value()\n with tf.control_dependencies([value]):\n with tf.control_dependencies([v.assign_add(1)]):\n return tf.identity(value)\n ```\n\n Here, a snapshot of `v` is captured in `value`; and then `v` is updated.\n The snapshot value is returned.\n\n If multiple workers or threads all execute `count` in parallel, there is no\n guarantee that access to the variable `v` is atomic at any point within\n any thread's calculation of `count`. In fact, even implementing an atomic\n counter that guarantees that the user will see each value `0, 1, ...,` is\n currently impossible.\n\n The solution is to ensure any access to the underlying resource `v` is\n only processed through a critical section:\n\n ```python\n cs = CriticalSection()\n f1 = cs.execute(count)\n f2 = cs.execute(count)\n output = f1 + f2\n session.run(output)\n ```\n The functions `f1` and `f2` will be executed serially, and updates to `v`\n will be atomic.\n\n **NOTES**\n\n All resource objects, including the critical section and any captured\n variables of functions executed on that critical section, will be\n colocated to the same device (host and cpu/gpu).\n\n When using multiple critical sections on the same resources, there is no\n guarantee of exclusive access to those resources. This behavior is disallowed\n by default (but see the kwarg `exclusive_resource_access`).\n\n For example, running the same function in two separate critical sections\n will not ensure serial execution:\n\n ```python\n v = tf.compat.v1.get_variable(\"v\", initializer=0.0, use_resource=True)\n def accumulate(up):\n x = v.read_value()\n with tf.control_dependencies([x]):\n with tf.control_dependencies([v.assign_add(up)]):\n return tf.identity(x)\n ex1 = CriticalSection().execute(\n accumulate, 1.0, exclusive_resource_access=False)\n ex2 = CriticalSection().execute(\n accumulate, 1.0, exclusive_resource_access=False)\n bad_sum = ex1 + ex2\n sess.run(v.initializer)\n sess.run(bad_sum) # May return 0.0\n ```\n "], ["tf.custom_gradient", "description: Decorator to define a function with a custom gradient.", false, "Decorator to define a function with a custom gradient.\n\n This decorator allows fine grained control over the gradients of a sequence\n for operations. This may be useful for multiple reasons, including providing\n a more efficient or numerically stable gradient for a sequence of operations.\n\n For example, consider the following function that commonly occurs in the\n computation of cross entropy and log likelihoods:\n\n ```python\n def log1pexp(x):\n return tf.math.log(1 + tf.exp(x))\n ```\n\n Due to numerical instability, the gradient of this function evaluated at x=100\n is NaN. For example:\n\n ```python\n x = tf.constant(100.)\n y = log1pexp(x)\n dy_dx = tf.gradients(y, x) # Will be NaN when evaluated.\n ```\n\n The gradient expression can be analytically simplified to provide numerical\n stability:\n\n ```python\n @tf.custom_gradient\n def log1pexp(x):\n e = tf.exp(x)\n def grad(upstream):\n return upstream * (1 - 1 / (1 + e))\n return tf.math.log(1 + e), grad\n ```\n\n With this definition, the gradient `dy_dx` at `x = 100` will be correctly\n evaluated as 1.0.\n\n The variable `upstream` is defined as the upstream gradient. i.e. the gradient\n from all the layers or functions originating from this layer. The above\n example has no upstream functions, therefore `upstream = dy/dy = 1.0`.\n\n Assume that `x_i` is `log1pexp` in the forward pass `x_1 = x_1(x_0)`,\n `x_2 = x_2(x_1)`, ..., `x_i = x_i(x_i-1)`, ..., `x_n = x_n(x_n-1)`. By\n chain rule we know that `dx_n/dx_0 = dx_n/dx_n-1 * dx_n-1/dx_n-2 * ... *\n dx_i/dx_i-1 * ... * dx_1/dx_0`.\n\n In this case the gradient of our current function defined as\n `dx_i/dx_i-1 = (1 - 1 / (1 + e))`. The upstream gradient `upstream` would be\n `dx_n/dx_n-1 * dx_n-1/dx_n-2 * ... * dx_i+1/dx_i`. The upstream gradient\n multiplied by the current gradient is then passed downstream.\n\n In case the function takes multiple variables as input, the `grad`\n function must also return the same number of variables.\n We take the function `z = x * y` as an example.\n\n >>> @tf.custom_gradient\n ... def bar(x, y):\n ... def grad(upstream):\n ... dz_dx = y\n ... dz_dy = x\n ... return upstream * dz_dx, upstream * dz_dy\n ... z = x * y\n ... return z, grad\n >>> x = tf.constant(2.0, dtype=tf.float32)\n >>> y = tf.constant(3.0, dtype=tf.float32)\n >>> with tf.GradientTape(persistent=True) as tape:\n ... tape.watch(x)\n ... tape.watch(y)\n ... z = bar(x, y)\n >>> z\n \n >>> tape.gradient(z, x)\n \n >>> tape.gradient(z, y)\n \n\n Nesting custom gradients can lead to unintuitive results. The default\n behavior does not correspond to n-th order derivatives. For example\n\n ```python\n @tf.custom_gradient\n def op(x):\n y = op1(x)\n @tf.custom_gradient\n def grad_fn(dy):\n gdy = op2(x, y, dy)\n def grad_grad_fn(ddy): # Not the 2nd order gradient of op w.r.t. x.\n return op3(x, y, dy, ddy)\n return gdy, grad_grad_fn\n return y, grad_fn\n ```\n\n The function `grad_grad_fn` will be calculating the first order gradient\n of `grad_fn` with respect to `dy`, which is used to generate forward-mode\n gradient graphs from backward-mode gradient graphs, but is not the same as\n the second order gradient of `op` with respect to `x`.\n\n Instead, wrap nested `@tf.custom_gradients` in another function:\n\n ```python\n @tf.custom_gradient\n def op_with_fused_backprop(x):\n y, x_grad = fused_op(x)\n def first_order_gradient(dy):\n @tf.custom_gradient\n def first_order_custom(unused_x):\n def second_order_and_transpose(ddy):\n return second_order_for_x(...), gradient_wrt_dy(...)\n return x_grad, second_order_and_transpose\n return dy * first_order_custom(x)\n return y, first_order_gradient\n ```\n\n Additional arguments to the inner `@tf.custom_gradient`-decorated function\n control the expected return values of the innermost function.\n\n The examples above illustrate how to specify custom gradients for functions\n which do not read from variables. The following example uses variables, which\n require special handling because they are effectively inputs of the forward\n function.\n\n >>> weights = tf.Variable(tf.ones([2])) # Trainable variable weights\n >>> @tf.custom_gradient\n ... def linear_poly(x):\n ... # Creating polynomial\n ... poly = weights[1] * x + weights[0]\n ...\n ... def grad_fn(dpoly, variables):\n ... # dy/dx = weights[1] and we need to left multiply dpoly\n ... grad_xs = dpoly * weights[1] # Scalar gradient\n ...\n ... grad_vars = [] # To store gradients of passed variables\n ... assert variables is not None\n ... assert len(variables) == 1\n ... assert variables[0] is weights\n ... # Manually computing dy/dweights\n ... dy_dw = dpoly * tf.stack([x ** 1, x ** 0])\n ... grad_vars.append(\n ... tf.reduce_sum(tf.reshape(dy_dw, [2, -1]), axis=1)\n ... )\n ... return grad_xs, grad_vars\n ... return poly, grad_fn\n >>> x = tf.constant([1., 2., 3.])\n >>> with tf.GradientTape(persistent=True) as tape:\n ... tape.watch(x)\n ... poly = linear_poly(x)\n >>> poly # poly = x + 1\n \n >>> tape.gradient(poly, x) # conventional scalar gradient dy/dx\n \n >>> tape.gradient(poly, weights)\n \n\n Above example illustrates usage of trainable variable `weights`.\n In the example, the inner `grad_fn` accepts an extra `variables` input\n parameter and also returns an extra `grad_vars` output. That extra argument\n is passed if the forward function reads any variables. You need to\n compute the gradient w.r.t. each of those `variables` and output it as a list\n of `grad_vars`. Note here that default value of `variables` is set to `None`\n when no variables are used in the forward function.\n\n It should be noted `tf.GradientTape` is still watching the forward pass of a\n `tf.custom_gradient`, and will use the ops it watches. As a consequence,\n calling `tf.function` while the tape is still watching leads\n to a gradient graph being built. If an op is used in `tf.function` without\n registered gradient, a `LookupError` will be raised.\n\n Users can insert `tf.stop_gradient` to customize this behavior. This\n is demonstrated in the example below. `tf.random.shuffle` does not have a\n registered gradient. As a result `tf.stop_gradient` is used to avoid the\n `LookupError`.\n\n ```python\n x = tf.constant([0.3, 0.5], dtype=tf.float32)\n\n @tf.custom_gradient\n def test_func_with_stop_grad(x):\n @tf.function\n def _inner_func():\n # Avoid exception during the forward pass\n return tf.stop_gradient(tf.random.shuffle(x))\n # return tf.random.shuffle(x) # This will raise\n\n res = _inner_func()\n def grad(upstream):\n return upstream # Arbitrarily defined custom gradient\n return res, grad\n\n with tf.GradientTape() as g:\n g.watch(x)\n res = test_func_with_stop_grad(x)\n\n g.gradient(res, x)\n ```\n\n See also `tf.RegisterGradient` which registers a gradient function for a\n primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows\n for fine grained control over the gradient computation of a sequence of\n operations.\n\n Note that if the decorated function uses `Variable`s, the enclosing variable\n scope must be using `ResourceVariable`s.\n\n Args:\n f: function `f(*x)` that returns a tuple `(y, grad_fn)` where:\n - `x` is a sequence of (nested structures of) `Tensor` inputs to the\n function.\n - `y` is a (nested structure of) `Tensor` outputs of applying TensorFlow\n operations in `f` to `x`.\n - `grad_fn` is a function with the signature `g(*grad_ys)` which returns\n a list of `Tensor`s the same size as (flattened) `x` - the derivatives\n of `Tensor`s in `y` with respect to the `Tensor`s in `x`. `grad_ys` is\n a sequence of `Tensor`s the same size as (flattened) `y` holding the\n initial value gradients for each `Tensor` in `y`.\n\n In a pure mathematical sense, a vector-argument vector-valued function\n `f`'s derivatives should be its Jacobian matrix `J`. Here we are\n expressing the Jacobian `J` as a function `grad_fn` which defines how\n `J` will transform a vector `grad_ys` when left-multiplied with it\n (`grad_ys * J`, the vector-Jacobian product, or VJP). This functional\n representation of a matrix is convenient to use for chain-rule\n calculation (in e.g. the back-propagation algorithm).\n\n If `f` uses `Variable`s (that are not part of the\n inputs), i.e. through `get_variable`, then `grad_fn` should have\n signature `g(*grad_ys, variables=None)`, where `variables` is a list of\n the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where\n `grad_xs` is the same as above, and `grad_vars` is a `list`\n with the derivatives of `Tensor`s in `y` with respect to the variables\n (that is, grad_vars has one Tensor per variable in variables).\n\n Returns:\n A function `h(x)` which returns the same value as `f(x)[0]` and whose\n gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`.\n "], ["tf.data", "description: tf.data.Dataset API for input pipelines.", true, "`tf.data.Dataset` API for input pipelines.\n\nSee [Importing Data](https://tensorflow.org/guide/data) for an overview.\n\n"], ["tf.debugging", "description: Public API for tf.debugging namespace.", true, "Public API for tf.debugging namespace.\n"], ["tf.device", "description: Specifies the device for ops created/executed in this context.", false, "Specifies the device for ops created/executed in this context.\n\n This function specifies the device to be used for ops created/executed in a\n particular context. Nested contexts will inherit and also create/execute\n their ops on the specified device. If a specific device is not required,\n consider not using this function so that a device can be automatically\n assigned. In general the use of this function is optional. `device_name` can\n be fully specified, as in \"/job:worker/task:1/device:cpu:0\", or partially\n specified, containing only a subset of the \"/\"-separated fields. Any fields\n which are specified will override device annotations from outer scopes.\n\n For example:\n\n ```python\n with tf.device('/job:foo'):\n # ops created here have devices with /job:foo\n with tf.device('/job:bar/task:0/device:gpu:2'):\n # ops created here have the fully specified device above\n with tf.device('/device:gpu:1'):\n # ops created here have the device '/job:foo/device:gpu:1'\n ```\n\n Args:\n device_name: The device name to use in the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If a function is passed in.\n "], ["tf.DeviceSpec", "description: Represents a (possibly partial) specification for a TensorFlow device.", false, "Represents a (possibly partial) specification for a TensorFlow device.\n\n `DeviceSpec`s are used throughout TensorFlow to describe where state is stored\n and computations occur. Using `DeviceSpec` allows you to parse device spec\n strings to verify their validity, merge them or compose them programmatically.\n\n Example:\n\n ```python\n # Place the operations on device \"GPU:0\" in the \"ps\" job.\n device_spec = DeviceSpec(job=\"ps\", device_type=\"GPU\", device_index=0)\n with tf.device(device_spec.to_string()):\n # Both my_var and squared_var will be placed on /job:ps/device:GPU:0.\n my_var = tf.Variable(..., name=\"my_variable\")\n squared_var = tf.square(my_var)\n ```\n\n With eager execution disabled (by default in TensorFlow 1.x and by calling\n disable_eager_execution() in TensorFlow 2.x), the following syntax\n can be used:\n\n ```python\n tf.compat.v1.disable_eager_execution()\n\n # Same as previous\n device_spec = DeviceSpec(job=\"ps\", device_type=\"GPU\", device_index=0)\n # No need of .to_string() method.\n with tf.device(device_spec):\n my_var = tf.Variable(..., name=\"my_variable\")\n squared_var = tf.square(my_var)\n ```\n\n If a `DeviceSpec` is partially specified, it will be merged with other\n `DeviceSpec`s according to the scope in which it is defined. `DeviceSpec`\n components defined in inner scopes take precedence over those defined in\n outer scopes.\n\n ```python\n gpu0_spec = DeviceSpec(job=\"ps\", device_type=\"GPU\", device_index=0)\n with tf.device(DeviceSpec(job=\"train\").to_string()):\n with tf.device(gpu0_spec.to_string()):\n # Nodes created here will be assigned to /job:ps/device:GPU:0.\n with tf.device(DeviceSpec(device_type=\"GPU\", device_index=1).to_string()):\n # Nodes created here will be assigned to /job:train/device:GPU:1.\n ```\n\n A `DeviceSpec` consists of 5 components -- each of\n which is optionally specified:\n\n * Job: The job name.\n * Replica: The replica index.\n * Task: The task index.\n * Device type: The device type string (e.g. \"CPU\" or \"GPU\").\n * Device index: The device index.\n "], ["tf.distribute", "description: Library for running a computation across multiple devices.", true, "Library for running a computation across multiple devices.\n\nThe intent of this library is that you can write an algorithm in a stylized way\nand it will be usable with a variety of different `tf.distribute.Strategy`\nimplementations. Each descendant will implement a different strategy for\ndistributing the algorithm across multiple devices/machines. Furthermore, these\nchanges can be hidden inside the specific layers and other library classes that\nneed special treatment to run in a distributed setting, so that most users'\nmodel definition code can run unchanged. The `tf.distribute.Strategy` API works\nthe same way with eager and graph execution.\n\n*Guides*\n\n* [TensorFlow v2.x](https://www.tensorflow.org/guide/distributed_training)\n* [TensorFlow v1.x](https://github.com/tensorflow/docs/blob/master/site/en/r1/guide/distribute_strategy.ipynb)\n\n*Tutorials*\n\n* [Distributed Training Tutorials](https://www.tensorflow.org/tutorials/distribute/)\n\n The tutorials cover how to use `tf.distribute.Strategy` to do distributed\n training with native Keras APIs, custom training loops,\n and Estimator APIs. They also cover how to save/load model when using\n `tf.distribute.Strategy`.\n\n*Glossary*\n\n* _Data parallelism_ is where we run multiple copies of the model\n on different slices of the input data. This is in contrast to\n _model parallelism_ where we divide up a single copy of a model\n across multiple devices.\n Note: we only support data parallelism for now, but\n hope to add support for model parallelism in the future.\n* A _device_ is a CPU or accelerator (e.g. GPUs, TPUs) on some machine that\n TensorFlow can run operations on (see e.g. `tf.device`). You may have multiple\n devices on a single machine, or be connected to devices on multiple\n machines. Devices used to run computations are called _worker devices_.\n Devices used to store variables are _parameter devices_. For some strategies,\n such as `tf.distribute.MirroredStrategy`, the worker and parameter devices\n will be the same (see mirrored variables below). For others they will be\n different. For example, `tf.distribute.experimental.CentralStorageStrategy`\n puts the variables on a single device (which may be a worker device or may be\n the CPU), and `tf.distribute.experimental.ParameterServerStrategy` puts the\n variables on separate machines called _parameter servers_ (see below).\n* A _replica_ is one copy of the model, running on one slice of the\n input data. Right now each replica is executed on its own\n worker device, but once we add support for model parallelism\n a replica may span multiple worker devices.\n* A _host_ is the CPU device on a machine with worker devices, typically\n used for running input pipelines.\n* A _worker_ is defined to be the physical machine(s) containing the physical\n devices (e.g. GPUs, TPUs) on which the replicated computation is executed. A\n worker may contain one or more replicas, but contains at least one\n replica. Typically one worker will correspond to one machine, but in the case\n of very large models with model parallelism, one worker may span multiple\n machines. We typically run one input pipeline per worker, feeding all the\n replicas on that worker.\n* _Synchronous_, or more commonly _sync_, training is where the updates from\n each replica are aggregated together before updating the model variables. This\n is in contrast to _asynchronous_, or _async_ training, where each replica\n updates the model variables independently. You may also have replicas\n partitioned into groups which are in sync within each group but async between\n groups.\n* _Parameter servers_: These are machines that hold a single copy of\n parameters/variables, used by some strategies (right now just\n `tf.distribute.experimental.ParameterServerStrategy`). All replicas that want\n to operate on a variable retrieve it at the beginning of a step and send an\n update to be applied at the end of the step. These can in principle support\n either sync or async training, but right now we only have support for async\n training with parameter servers. Compare to\n `tf.distribute.experimental.CentralStorageStrategy`, which puts all variables\n on a single device on the same machine (and does sync training), and\n `tf.distribute.MirroredStrategy`, which mirrors variables to multiple devices\n (see below).\n\n* _Replica context_ vs. _Cross-replica context_ vs _Update context_\n\n A _replica context_ applies\n when you execute the computation function that was called with `strategy.run`.\n Conceptually, you're in replica context when executing the computation\n function that is being replicated.\n\n An _update context_ is entered in a `tf.distribute.StrategyExtended.update`\n call.\n\n An _cross-replica context_ is entered when you enter a `strategy.scope`. This\n is useful for calling `tf.distribute.Strategy` methods which operate across\n the replicas (like `reduce_to()`). By default you start in a _replica context_\n (the \"default single _replica context_\") and then some methods can switch you\n back and forth.\n\n* _Distributed value_: Distributed value is represented by the base class\n `tf.distribute.DistributedValues`. `tf.distribute.DistributedValues` is useful\n to represent values on multiple devices, and it contains a map from replica id\n to values. Two representative kinds of `tf.distribute.DistributedValues` are\n \"PerReplica\" and \"Mirrored\" values.\n\n \"PerReplica\" values exist on the worker\n devices, with a different value for each replica. They are produced by\n iterating through a distributed dataset returned by\n `tf.distribute.Strategy.experimental_distribute_dataset` and\n `tf.distribute.Strategy.distribute_datasets_from_function`. They\n are also the typical result returned by\n `tf.distribute.Strategy.run`.\n\n \"Mirrored\" values are like \"PerReplica\" values, except we know that the value\n on all replicas are the same. We can safely read a \"Mirrored\" value in a\n cross-replica context by using the value on any replica.\n\n* _Unwrapping_ and _merging_: Consider calling a function `fn` on multiple\n replicas, like `strategy.run(fn, args=[w])` with an\n argument `w` that is a `tf.distribute.DistributedValues`. This means `w` will\n have a map taking replica id `0` to `w0`, replica id `1` to `w1`, etc.\n `strategy.run()` unwraps `w` before calling `fn`, so it calls `fn(w0)` on\n device `d0`, `fn(w1)` on device `d1`, etc. It then merges the return\n values from `fn()`, which leads to one common object if the returned values\n are the same object from every replica, or a `DistributedValues` object\n otherwise.\n\n* _Reductions_ and _all-reduce_: A _reduction_ is a method of aggregating\n multiple values into one value, like \"sum\" or \"mean\". If a strategy is doing\n sync training, we will perform a reduction on the gradients to a parameter\n from all replicas before applying the update. _All-reduce_ is an algorithm for\n performing a reduction on values from multiple devices and making the result\n available on all of those devices.\n\n* _Mirrored variables_: These are variables that are created on multiple\n devices, where we keep the variables in sync by applying the same\n updates to every copy. Mirrored variables are created with\n `tf.Variable(...synchronization=tf.VariableSynchronization.ON_WRITE...)`.\n Normally they are only used in synchronous training.\n\n* _SyncOnRead variables_\n\n _SyncOnRead variables_ are created by\n `tf.Variable(...synchronization=tf.VariableSynchronization.ON_READ...)`, and\n they are created on multiple devices. In replica context, each\n component variable on the local replica can perform reads and writes without\n synchronization with each other. When the\n _SyncOnRead variable_ is read in cross-replica context, the values from\n component variables are aggregated and returned.\n\n _SyncOnRead variables_ bring a lot of custom configuration difficulty to the\n underlying logic, so we do not encourage users to instantiate and use\n _SyncOnRead variable_ on their own. We have mainly used _SyncOnRead\n variables_ for use cases such as batch norm and metrics. For performance\n reasons, we often don't need to keep these statistics in sync every step and\n they can be accumulated on each replica independently. The only time we want\n to sync them is reporting or checkpointing, which typically happens in\n cross-replica context. _SyncOnRead variables_ are also often used by advanced\n users who want to control when variable values are aggregated. For example,\n users sometimes want to maintain gradients independently on each replica for a\n couple of steps without aggregation.\n\n* _Distribute-aware layers_\n\n Layers are generally called in a replica context, except when defining a\n Keras functional model. `tf.distribute.in_cross_replica_context` will let you\n determine which case you are in. If in a replica context,\n the `tf.distribute.get_replica_context` function will return the default\n replica context outside a strategy scope, `None` within a strategy scope, and\n a `tf.distribute.ReplicaContext` object inside a strategy scope and within a\n `tf.distribute.Strategy.run` function. The `ReplicaContext` object has an\n `all_reduce` method for aggregating across all replicas.\n\n\nNote that we provide a default version of `tf.distribute.Strategy` that is\nused when no other strategy is in scope, that provides the same API with\nreasonable default behavior.\n\n"], ["tf.dtypes", "description: Public API for tf.dtypes namespace.", true, "Public API for tf.dtypes namespace.\n"], ["tf.dynamic_partition", "description: Partitions data into num_partitions tensors using indices from partitions.", false, "Partitions `data` into `num_partitions` tensors using indices from `partitions`.\n\n For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`\n becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`\n are placed in `outputs[i]` in lexicographic order of `js`, and the first\n dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.\n In detail,\n\n ```python\n outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]\n\n outputs[i] = pack([data[js, ...] for js if partitions[js] == i])\n ```\n\n `data.shape` must start with `partitions.shape`.\n\n For example:\n\n ```python\n # Scalar partitions.\n partitions = 1\n num_partitions = 2\n data = [10, 20]\n outputs[0] = [] # Empty with shape [0, 2]\n outputs[1] = [[10, 20]]\n\n # Vector partitions.\n partitions = [0, 0, 1, 1, 0]\n num_partitions = 2\n data = [10, 20, 30, 40, 50]\n outputs[0] = [10, 20, 50]\n outputs[1] = [30, 40]\n ```\n\n See `dynamic_stitch` for an example on how to merge partitions back.\n\n
\n \n
\n\n Args:\n data: A `Tensor`.\n partitions: A `Tensor` of type `int32`.\n Any shape. Indices in the range `[0, num_partitions)`.\n num_partitions: An `int` that is `>= 1`.\n The number of partitions to output.\n name: A name for the operation (optional).\n\n Returns:\n A list of `num_partitions` `Tensor` objects with the same type as `data`.\n "], ["tf.dynamic_stitch", "description: Interleave the values from the data tensors into a single tensor.", false, "Interleave the values from the `data` tensors into a single tensor.\n\n Builds a merged tensor such that\n\n ```python\n merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]\n ```\n\n For example, if each `indices[m]` is scalar or vector, we have\n\n ```python\n # Scalar indices:\n merged[indices[m], ...] = data[m][...]\n\n # Vector indices:\n merged[indices[m][i], ...] = data[m][i, ...]\n ```\n\n Each `data[i].shape` must start with the corresponding `indices[i].shape`,\n and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we\n must have `data[i].shape = indices[i].shape + constant`. In terms of this\n `constant`, the output shape is\n\n merged.shape = [max(indices)] + constant\n\n Values are merged in order, so if an index appears in both `indices[m][i]` and\n `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the\n merged result. If you do not need this guarantee, ParallelDynamicStitch might\n perform better on some devices.\n\n For example:\n\n ```python\n indices[0] = 6\n indices[1] = [4, 1]\n indices[2] = [[5, 2], [0, 3]]\n data[0] = [61, 62]\n data[1] = [[41, 42], [11, 12]]\n data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]\n merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],\n [51, 52], [61, 62]]\n ```\n\n This method can be used to merge partitions created by `dynamic_partition`\n as illustrated on the following example:\n\n ```python\n # Apply function (increments x_i) on elements for which a certain condition\n # apply (x_i != -1 in this example).\n x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])\n condition_mask=tf.not_equal(x,tf.constant(-1.))\n partitioned_data = tf.dynamic_partition(\n x, tf.cast(condition_mask, tf.int32) , 2)\n partitioned_data[1] = partitioned_data[1] + 1.0\n condition_indices = tf.dynamic_partition(\n tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)\n x = tf.dynamic_stitch(condition_indices, partitioned_data)\n # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain\n # unchanged.\n ```\n\n
\n \n
\n\n Args:\n indices: A list of at least 1 `Tensor` objects with type `int32`.\n data: A list with the same length as `indices` of `Tensor` objects with the same type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `data`.\n "], ["tf.edit_distance", "description: Computes the Levenshtein distance between sequences.", false, "Computes the Levenshtein distance between sequences.\n\n This operation takes variable-length sequences (`hypothesis` and `truth`),\n each provided as a `SparseTensor`, and computes the Levenshtein distance.\n You can normalize the edit distance by length of `truth` by setting\n `normalize` to true.\n\n For example:\n\n Given the following input,\n * `hypothesis` is a `tf.SparseTensor` of shape `[2, 1, 1]`\n * `truth` is a `tf.SparseTensor` of shape `[2, 2, 2]`\n\n >>> hypothesis = tf.SparseTensor(\n ... [[0, 0, 0],\n ... [1, 0, 0]],\n ... [\"a\", \"b\"],\n ... (2, 1, 1))\n >>> truth = tf.SparseTensor(\n ... [[0, 1, 0],\n ... [1, 0, 0],\n ... [1, 0, 1],\n ... [1, 1, 0]],\n ... [\"a\", \"b\", \"c\", \"a\"],\n ... (2, 2, 2))\n >>> tf.edit_distance(hypothesis, truth, normalize=True)\n \n\n The operation returns a dense Tensor of shape `[2, 2]` with\n edit distances normalized by `truth` lengths.\n\n **Note**: It is possible to calculate edit distance between two\n sparse tensors with variable-length values. However, attempting to create\n them while eager execution is enabled will result in a `ValueError`.\n\n For the following inputs,\n\n ```python\n # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:\n # (0,0) = [\"a\"]\n # (1,0) = [\"b\"]\n hypothesis = tf.sparse.SparseTensor(\n [[0, 0, 0],\n [1, 0, 0]],\n [\"a\", \"b\"],\n (2, 1, 1))\n\n # 'truth' is a tensor of shape `[2, 2]` with variable-length values:\n # (0,0) = []\n # (0,1) = [\"a\"]\n # (1,0) = [\"b\", \"c\"]\n # (1,1) = [\"a\"]\n truth = tf.sparse.SparseTensor(\n [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]],\n [\"a\", \"b\", \"c\", \"a\"],\n (2, 2, 2))\n\n normalize = True\n\n # The output would be a dense Tensor of shape `(2,)`, with edit distances\n normalized by 'truth' lengths.\n # output => array([0., 0.5], dtype=float32)\n ```\n\n Args:\n hypothesis: A `SparseTensor` containing hypothesis sequences.\n truth: A `SparseTensor` containing truth sequences.\n normalize: A `bool`. If `True`, normalizes the Levenshtein distance by\n length of `truth.`\n name: A name for the operation (optional).\n\n Returns:\n A dense `Tensor` with rank `R - 1`, where R is the rank of the\n `SparseTensor` inputs `hypothesis` and `truth`.\n\n Raises:\n TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.\n "], ["tf.einsum", "description: Tensor contraction over specified indices and outer product.", false, "Tensor contraction over specified indices and outer product.\n\n Einsum allows defining Tensors by defining their element-wise computation.\n This computation is defined by `equation`, a shorthand form based on Einstein\n summation. As an example, consider multiplying two matrices A and B to form a\n matrix C. The elements of C are given by:\n\n $$ C_{i,k} = \\sum_j A_{i,j} B_{j,k} $$\n\n or\n\n ```\n C[i,k] = sum_j A[i,j] * B[j,k]\n ```\n\n The corresponding einsum `equation` is:\n\n ```\n ij,jk->ik\n ```\n\n In general, to convert the element-wise equation into the `equation` string,\n use the following procedure (intermediate strings for matrix multiplication\n example provided in parentheses):\n\n 1. remove variable names, brackets, and commas, (`ik = sum_j ij * jk`)\n 2. replace \"*\" with \",\", (`ik = sum_j ij , jk`)\n 3. drop summation signs, and (`ik = ij, jk`)\n 4. move the output to the right, while replacing \"=\" with \"->\". (`ij,jk->ik`)\n\n Note: If the output indices are not specified repeated indices are summed.\n So `ij,jk->ik` can be simplified to `ij,jk`.\n\n Many common operations can be expressed in this way. For example:\n\n **Matrix multiplication**\n\n >>> m0 = tf.random.normal(shape=[2, 3])\n >>> m1 = tf.random.normal(shape=[3, 5])\n >>> e = tf.einsum('ij,jk->ik', m0, m1)\n >>> # output[i,k] = sum_j m0[i,j] * m1[j, k]\n >>> print(e.shape)\n (2, 5)\n\n Repeated indices are summed if the output indices are not specified.\n\n >>> e = tf.einsum('ij,jk', m0, m1) # output[i,k] = sum_j m0[i,j] * m1[j, k]\n >>> print(e.shape)\n (2, 5)\n\n\n **Dot product**\n\n >>> u = tf.random.normal(shape=[5])\n >>> v = tf.random.normal(shape=[5])\n >>> e = tf.einsum('i,i->', u, v) # output = sum_i u[i]*v[i]\n >>> print(e.shape)\n ()\n\n **Outer product**\n\n >>> u = tf.random.normal(shape=[3])\n >>> v = tf.random.normal(shape=[5])\n >>> e = tf.einsum('i,j->ij', u, v) # output[i,j] = u[i]*v[j]\n >>> print(e.shape)\n (3, 5)\n\n **Transpose**\n\n >>> m = tf.ones(2,3)\n >>> e = tf.einsum('ij->ji', m0) # output[j,i] = m0[i,j]\n >>> print(e.shape)\n (3, 2)\n\n **Diag**\n\n >>> m = tf.reshape(tf.range(9), [3,3])\n >>> diag = tf.einsum('ii->i', m)\n >>> print(diag.shape)\n (3,)\n\n **Trace**\n\n >>> # Repeated indices are summed.\n >>> trace = tf.einsum('ii', m) # output[j,i] = trace(m) = sum_i m[i, i]\n >>> assert trace == sum(diag)\n >>> print(trace.shape)\n ()\n\n **Batch matrix multiplication**\n\n >>> s = tf.random.normal(shape=[7,5,3])\n >>> t = tf.random.normal(shape=[7,3,2])\n >>> e = tf.einsum('bij,bjk->bik', s, t)\n >>> # output[a,i,k] = sum_j s[a,i,j] * t[a, j, k]\n >>> print(e.shape)\n (7, 5, 2)\n\n This method does not support broadcasting on named-axes. All axes with\n matching labels should have the same length. If you have length-1 axes,\n use `tf.squeeze` or `tf.reshape` to eliminate them.\n\n To write code that is agnostic to the number of indices in the input\n use an ellipsis. The ellipsis is a placeholder for \"whatever other indices\n fit here\".\n\n For example, to perform a NumPy-style broadcasting-batch-matrix multiplication\n where the matrix multiply acts on the last two axes of the input, use:\n\n >>> s = tf.random.normal(shape=[11, 7, 5, 3])\n >>> t = tf.random.normal(shape=[11, 7, 3, 2])\n >>> e = tf.einsum('...ij,...jk->...ik', s, t)\n >>> print(e.shape)\n (11, 7, 5, 2)\n\n Einsum **will** broadcast over axes covered by the ellipsis.\n\n >>> s = tf.random.normal(shape=[11, 1, 5, 3])\n >>> t = tf.random.normal(shape=[1, 7, 3, 2])\n >>> e = tf.einsum('...ij,...jk->...ik', s, t)\n >>> print(e.shape)\n (11, 7, 5, 2)\n\n Args:\n equation: a `str` describing the contraction, in the same format as\n `numpy.einsum`.\n *inputs: the inputs to contract (each one a `Tensor`), whose shapes should\n be consistent with `equation`.\n **kwargs:\n - optimize: Optimization strategy to use to find contraction path using\n opt_einsum. Must be 'greedy', 'optimal', 'branch-2', 'branch-all' or\n 'auto'. (optional, default: 'greedy').\n - name: A name for the operation (optional).\n\n Returns:\n The contracted `Tensor`, with shape determined by `equation`.\n\n Raises:\n ValueError: If\n - the format of `equation` is incorrect,\n - number of inputs or their shapes are inconsistent with `equation`.\n "], ["tf.ensure_shape", "description: Updates the shape of a tensor and checks at runtime that the shape holds.", false, "Updates the shape of a tensor and checks at runtime that the shape holds.\n\n When executed, this operation asserts that the input tensor `x`'s shape\n is compatible with the `shape` argument.\n See `tf.TensorShape.is_compatible_with` for details.\n\n >>> x = tf.constant([[1, 2, 3],\n ... [4, 5, 6]])\n >>> x = tf.ensure_shape(x, [2, 3])\n\n Use `None` for unknown dimensions:\n\n >>> x = tf.ensure_shape(x, [None, 3])\n >>> x = tf.ensure_shape(x, [2, None])\n\n If the tensor's shape is not compatible with the `shape` argument, an error\n is raised:\n\n >>> x = tf.ensure_shape(x, [5])\n Traceback (most recent call last):\n ...\n tf.errors.InvalidArgumentError: Shape of tensor dummy_input [3] is not\n compatible with expected shape [5]. [Op:EnsureShape]\n\n During graph construction (typically tracing a `tf.function`),\n `tf.ensure_shape` updates the static-shape of the **result** tensor by\n merging the two shapes. See `tf.TensorShape.merge_with` for details.\n\n This is most useful when **you** know a shape that can't be determined\n statically by TensorFlow.\n\n The following trivial `tf.function` prints the input tensor's\n static-shape before and after `ensure_shape` is applied.\n\n >>> @tf.function\n ... def f(tensor):\n ... print(\"Static-shape before:\", tensor.shape)\n ... tensor = tf.ensure_shape(tensor, [None, 3])\n ... print(\"Static-shape after:\", tensor.shape)\n ... return tensor\n\n This lets you see the effect of `tf.ensure_shape` when the function is traced:\n >>> cf = f.get_concrete_function(tf.TensorSpec([None, None]))\n Static-shape before: (None, None)\n Static-shape after: (None, 3)\n\n >>> cf(tf.zeros([3, 3])) # Passes\n >>> cf(tf.constant([1, 2, 3])) # fails\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Shape of tensor x [3] is not compatible with expected shape [3,3].\n\n The above example raises `tf.errors.InvalidArgumentError`, because `x`'s\n shape, `(3,)`, is not compatible with the `shape` argument, `(None, 3)`\n\n Inside a `tf.function` or `v1.Graph` context it checks both the buildtime and\n runtime shapes. This is stricter than `tf.Tensor.set_shape` which only\n checks the buildtime shape.\n\n Note: This differs from `tf.Tensor.set_shape` in that it sets the static shape\n of the resulting tensor and enforces it at runtime, raising an error if the\n tensor's runtime shape is incompatible with the specified shape.\n `tf.Tensor.set_shape` sets the static shape of the tensor without enforcing it\n at runtime, which may result in inconsistencies between the statically-known\n shape of tensors and the runtime value of tensors.\n\n For example, of loading images of a known size:\n\n >>> @tf.function\n ... def decode_image(png):\n ... image = tf.image.decode_png(png, channels=3)\n ... # the `print` executes during tracing.\n ... print(\"Initial shape: \", image.shape)\n ... image = tf.ensure_shape(image,[28, 28, 3])\n ... print(\"Final shape: \", image.shape)\n ... return image\n\n When tracing a function, no ops are being executed, shapes may be unknown.\n See the [Concrete Functions Guide](https://www.tensorflow.org/guide/concrete_function)\n for details.\n\n >>> concrete_decode = decode_image.get_concrete_function(\n ... tf.TensorSpec([], dtype=tf.string))\n Initial shape: (None, None, 3)\n Final shape: (28, 28, 3)\n\n >>> image = tf.random.uniform(maxval=255, shape=[28, 28, 3], dtype=tf.int32)\n >>> image = tf.cast(image,tf.uint8)\n >>> png = tf.image.encode_png(image)\n >>> image2 = concrete_decode(png)\n >>> print(image2.shape)\n (28, 28, 3)\n\n >>> image = tf.concat([image,image], axis=0)\n >>> print(image.shape)\n (56, 28, 3)\n >>> png = tf.image.encode_png(image)\n >>> image2 = concrete_decode(png)\n Traceback (most recent call last):\n ...\n tf.errors.InvalidArgumentError: Shape of tensor DecodePng [56,28,3] is not\n compatible with expected shape [28,28,3].\n\n Caution: if you don't use the result of `tf.ensure_shape` the check may not\n run.\n\n >>> @tf.function\n ... def bad_decode_image(png):\n ... image = tf.image.decode_png(png, channels=3)\n ... # the `print` executes during tracing.\n ... print(\"Initial shape: \", image.shape)\n ... # BAD: forgot to use the returned tensor.\n ... tf.ensure_shape(image,[28, 28, 3])\n ... print(\"Final shape: \", image.shape)\n ... return image\n\n >>> image = bad_decode_image(png)\n Initial shape: (None, None, 3)\n Final shape: (None, None, 3)\n >>> print(image.shape)\n (56, 28, 3)\n\n Args:\n x: A `Tensor`.\n shape: A `TensorShape` representing the shape of this tensor, a\n `TensorShapeProto`, a list, a tuple, or None.\n name: A name for this operation (optional). Defaults to \"EnsureShape\".\n\n Returns:\n A `Tensor`. Has the same type and contents as `x`.\n\n Raises:\n tf.errors.InvalidArgumentError: If `shape` is incompatible with the shape\n of `x`.\n "], ["tf.errors", "description: Exception types for TensorFlow errors.", true, "Exception types for TensorFlow errors.\n"], ["tf.estimator", "description: Estimator: High level tools for working with models.", true, null], ["tf.executing_eagerly", "description: Checks whether the current thread has eager execution enabled.", false, "Checks whether the current thread has eager execution enabled.\n\n Eager execution is enabled by default and this API returns `True`\n in most of cases. However, this API might return `False` in the following use\n cases.\n\n * Executing inside `tf.function`, unless under `tf.init_scope` or\n `tf.config.run_functions_eagerly(True)` is previously called.\n * Executing inside a transformation function for `tf.dataset`.\n * `tf.compat.v1.disable_eager_execution()` is called.\n\n General case:\n\n >>> print(tf.executing_eagerly())\n True\n\n Inside `tf.function`:\n\n >>> @tf.function\n ... def fn():\n ... with tf.init_scope():\n ... print(tf.executing_eagerly())\n ... print(tf.executing_eagerly())\n >>> fn()\n True\n False\n\n Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called:\n\n >>> tf.config.run_functions_eagerly(True)\n >>> @tf.function\n ... def fn():\n ... with tf.init_scope():\n ... print(tf.executing_eagerly())\n ... print(tf.executing_eagerly())\n >>> fn()\n True\n True\n >>> tf.config.run_functions_eagerly(False)\n\n Inside a transformation function for `tf.dataset`:\n\n >>> def data_fn(x):\n ... print(tf.executing_eagerly())\n ... return x\n >>> dataset = tf.data.Dataset.range(100)\n >>> dataset = dataset.map(data_fn)\n False\n\n Returns:\n `True` if the current thread has eager execution enabled.\n "], ["tf.expand_dims", "description: Returns a tensor with a length 1 axis inserted at index axis.", false, "Returns a tensor with a length 1 axis inserted at index `axis`.\n\n Given a tensor `input`, this operation inserts a dimension of length 1 at the\n dimension index `axis` of `input`'s shape. The dimension index follows Python\n indexing rules: It's zero-based, a negative index it is counted backward\n from the end.\n\n This operation is useful to:\n\n * Add an outer \"batch\" dimension to a single element.\n * Align axes for broadcasting.\n * To add an inner vector length axis to a tensor of scalars.\n\n For example:\n\n If you have a single image of shape `[height, width, channels]`:\n\n >>> image = tf.zeros([10,10,3])\n\n You can add an outer `batch` axis by passing `axis=0`:\n\n >>> tf.expand_dims(image, axis=0).shape.as_list()\n [1, 10, 10, 3]\n\n The new axis location matches Python `list.insert(axis, 1)`:\n\n >>> tf.expand_dims(image, axis=1).shape.as_list()\n [10, 1, 10, 3]\n\n Following standard Python indexing rules, a negative `axis` counts from the\n end so `axis=-1` adds an inner most dimension:\n\n >>> tf.expand_dims(image, -1).shape.as_list()\n [10, 10, 3, 1]\n\n This operation requires that `axis` is a valid index for `input.shape`,\n following Python indexing rules:\n\n ```\n -1-tf.rank(input) <= axis <= tf.rank(input)\n ```\n\n This operation is related to:\n\n * `tf.squeeze`, which removes dimensions of size 1.\n * `tf.reshape`, which provides more flexible reshaping capability.\n * `tf.sparse.expand_dims`, which provides this functionality for\n `tf.SparseTensor`\n\n Args:\n input: A `Tensor`.\n axis: Integer specifying the dimension index at which to expand the\n shape of `input`. Given an input of D dimensions, `axis` must be in range\n `[-(D+1), D]` (inclusive).\n name: Optional string. The name of the output `Tensor`.\n\n Returns:\n A tensor with the same data as `input`, with an additional dimension\n inserted at the index specified by `axis`.\n\n Raises:\n TypeError: If `axis` is not specified.\n InvalidArgumentError: If `axis` is out of range `[-(D+1), D]`.\n "], ["tf.experimental", "description: Public API for tf.experimental namespace.", true, "Public API for tf.experimental namespace.\n"], ["tf.extract_volume_patches", "description: Extract patches from input and put them in the \"depth\" output dimension. 3D extension of extract_image_patches.", false, "Extract `patches` from `input` and put them in the `\"depth\"` output dimension. 3D extension of `extract_image_patches`.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.\n ksizes: A list of `ints` that has length `>= 5`.\n The size of the sliding window for each dimension of `input`.\n strides: A list of `ints` that has length `>= 5`.\n 1-D of length 5. How far the centers of two consecutive patches are in\n `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.\n padding: A `string` from: `\"SAME\", \"VALID\"`.\n The type of padding algorithm to use.\n\n The size-related attributes are specified as follows:\n\n ```python\n ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]\n strides = [1, stride_planes, strides_rows, strides_cols, 1]\n ```\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "], ["tf.eye", "description: Construct an identity matrix, or a batch of matrices.", false, "Construct an identity matrix, or a batch of matrices.\n\n See also `tf.ones`, `tf.zeros`, `tf.fill`, `tf.one_hot`.\n\n ```python\n # Construct one identity matrix.\n tf.eye(2)\n ==> [[1., 0.],\n [0., 1.]]\n\n # Construct a batch of 3 identity matrices, each 2 x 2.\n # batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.\n batch_identity = tf.eye(2, batch_shape=[3])\n\n # Construct one 2 x 3 \"identity\" matrix\n tf.eye(2, num_columns=3)\n ==> [[ 1., 0., 0.],\n [ 0., 1., 0.]]\n ```\n\n Args:\n num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows\n in each batch matrix.\n num_columns: Optional non-negative `int32` scalar `Tensor` giving the number\n of columns in each batch matrix. Defaults to `num_rows`.\n batch_shape: A list or tuple of Python integers or a 1-D `int32` `Tensor`.\n If provided, the returned `Tensor` will have leading batch dimensions of\n this shape.\n dtype: The type of an element in the resulting `Tensor`\n name: A name for this `Op`. Defaults to \"eye\".\n\n Returns:\n A `Tensor` of shape `batch_shape + [num_rows, num_columns]`\n "], ["tf.feature_column", "description: Public API for tf.feature_column namespace.", true, "Public API for tf.feature_column namespace.\n"], ["tf.fill", "description: Creates a tensor filled with a scalar value.", false, "Creates a tensor filled with a scalar value.\n\n See also `tf.ones`, `tf.zeros`, `tf.one_hot`, `tf.eye`.\n\n This operation creates a tensor of shape `dims` and fills it with `value`.\n\n For example:\n\n >>> tf.fill([2, 3], 9)\n \n\n `tf.fill` evaluates at graph runtime and supports dynamic shapes based on\n other runtime `tf.Tensors`, unlike `tf.constant(value, shape=dims)`, which\n embeds the value as a `Const` node.\n\n Args:\n dims: A 1-D sequence of non-negative numbers. Represents the shape of the\n output `tf.Tensor`. Entries should be of type: `int32`, `int64`.\n value: A value to fill the returned `tf.Tensor`.\n name: Optional string. The name of the output `tf.Tensor`.\n\n Returns:\n A `tf.Tensor` with shape `dims` and the same dtype as `value`.\n\n Raises:\n InvalidArgumentError: `dims` contains negative entries.\n NotFoundError: `dims` contains non-integer entries.\n\n @compatibility(numpy)\n Similar to `np.full`. In `numpy`, more parameters are supported. Passing a\n number argument as the shape (`np.full(5, value)`) is valid in `numpy` for\n specifying a 1-D shaped result, while TensorFlow does not support this syntax.\n @end_compatibility\n "], ["tf.fingerprint", "description: Generates fingerprint values.", false, "Generates fingerprint values.\n\n Generates fingerprint values of `data`.\n\n Fingerprint op considers the first dimension of `data` as the batch dimension,\n and `output[i]` contains the fingerprint value generated from contents in\n `data[i, ...]` for all `i`.\n\n Fingerprint op writes fingerprint values as byte arrays. For example, the\n default method `farmhash64` generates a 64-bit fingerprint value at a time.\n This 8-byte value is written out as an `tf.uint8` array of size 8, in\n little-endian order.\n\n For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),\n and that the fingerprint method is `farmhash64`. In this case, the output\n shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the\n size of each fingerprint value in bytes. `output[0, :]` is generated from\n 12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from\n other 12 integers in `data[1, :, :]`.\n\n Note that this op fingerprints the raw underlying buffer, and it does not\n fingerprint Tensor's metadata such as data type and/or shape. For example, the\n fingerprint values are invariant under reshapes and bitcasts as long as the\n batch dimension remain the same:\n\n ```python\n tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))\n tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))\n ```\n\n For string data, one should expect `tf.fingerprint(data) !=\n tf.fingerprint(tf.string.reduce_join(data))` in general.\n\n Args:\n data: A `Tensor`. Must have rank 1 or higher.\n method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.\n Currently available method is `farmhash64`.\n name: A name for the operation (optional).\n\n Returns:\n A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to\n `data`'s first dimension, and the second dimension size depends on the\n fingerprint algorithm.\n "], ["tf.foldl", "description: foldl on the list of tensors unpacked from elems on dimension 0. (deprecated argument values)", false, "foldl on the list of tensors unpacked from `elems` on dimension 0. (deprecated argument values)\n\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(back_prop=False)`. They will be removed in a future version.\nInstructions for updating:\nback_prop=False is deprecated. Consider using tf.stop_gradient instead.\nInstead of:\nresults = tf.foldl(fn, elems, back_prop=False)\nUse:\nresults = tf.nest.map_structure(tf.stop_gradient, tf.foldl(fn, elems))\n\nThis foldl operator repeatedly applies the callable `fn` to a sequence\nof elements from first to last. The elements are made of the tensors\nunpacked from `elems` on dimension 0. The callable fn takes two tensors as\narguments. The first argument is the accumulated value computed from the\npreceding invocation of fn, and the second is the value at the current\nposition of `elems`. If `initializer` is None, `elems` must contain at least\none element, and its first element is used as the initializer.\n\nSuppose that `elems` is unpacked into `values`, a list of tensors. The shape\nof the result tensor is fn(initializer, values[0]).shape`.\n\nThis method also allows multi-arity `elems` and output of `fn`. If `elems`\nis a (possibly nested) list or tuple of tensors, then each of these tensors\nmust have a matching first (unpack) dimension. The signature of `fn` may\nmatch the structure of `elems`. That is, if `elems` is\n`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:\n`fn = lambda (t1, [t2, t3, [t4, t5]]):`.\n\nArgs:\n fn: The callable to be performed.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unpacked along their first dimension. The nested sequence of the\n resulting slices will be the first argument to `fn`.\n initializer: (optional) A tensor or (possibly nested) sequence of tensors,\n as the initial value for the accumulator.\n parallel_iterations: (optional) The number of iterations allowed to run in\n parallel.\n back_prop: (optional) Deprecated. False disables support for back\n propagation. Prefer using `tf.stop_gradient` instead.\n swap_memory: (optional) True enables GPU-CPU memory swapping.\n name: (optional) Name prefix for the returned tensors.\n\nReturns:\n A tensor or (possibly nested) sequence of tensors, resulting from applying\n `fn` consecutively to the list of tensors unpacked from `elems`, from first\n to last.\n\nRaises:\n TypeError: if `fn` is not callable.\n\nExample:\n ```python\n elems = tf.constant([1, 2, 3, 4, 5, 6])\n sum = foldl(lambda a, x: a + x, elems)\n # sum == 21\n ```"], ["tf.foldr", "description: foldr on the list of tensors unpacked from elems on dimension 0. (deprecated argument values)", false, "foldr on the list of tensors unpacked from `elems` on dimension 0. (deprecated argument values)\n\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(back_prop=False)`. They will be removed in a future version.\nInstructions for updating:\nback_prop=False is deprecated. Consider using tf.stop_gradient instead.\nInstead of:\nresults = tf.foldr(fn, elems, back_prop=False)\nUse:\nresults = tf.nest.map_structure(tf.stop_gradient, tf.foldr(fn, elems))\n\nThis foldr operator repeatedly applies the callable `fn` to a sequence\nof elements from last to first. The elements are made of the tensors\nunpacked from `elems`. The callable fn takes two tensors as arguments.\nThe first argument is the accumulated value computed from the preceding\ninvocation of fn, and the second is the value at the current position of\n`elems`. If `initializer` is None, `elems` must contain at least one element,\nand its first element is used as the initializer.\n\nSuppose that `elems` is unpacked into `values`, a list of tensors. The shape\nof the result tensor is `fn(initializer, values[0]).shape`.\n\nThis method also allows multi-arity `elems` and output of `fn`. If `elems`\nis a (possibly nested) list or tuple of tensors, then each of these tensors\nmust have a matching first (unpack) dimension. The signature of `fn` may\nmatch the structure of `elems`. That is, if `elems` is\n`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:\n`fn = lambda (t1, [t2, t3, [t4, t5]]):`.\n\nArgs:\n fn: The callable to be performed.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unpacked along their first dimension. The nested sequence of the\n resulting slices will be the first argument to `fn`.\n initializer: (optional) A tensor or (possibly nested) sequence of tensors,\n as the initial value for the accumulator.\n parallel_iterations: (optional) The number of iterations allowed to run in\n parallel.\n back_prop: (optional) Deprecated. False disables support for back\n propagation. Prefer using `tf.stop_gradient` instead.\n swap_memory: (optional) True enables GPU-CPU memory swapping.\n name: (optional) Name prefix for the returned tensors.\n\nReturns:\n A tensor or (possibly nested) sequence of tensors, resulting from applying\n `fn` consecutively to the list of tensors unpacked from `elems`, from last\n to first.\n\nRaises:\n TypeError: if `fn` is not callable.\n\nExample:\n ```python\n elems = [1, 2, 3, 4, 5, 6]\n sum = foldr(lambda a, x: a + x, elems)\n # sum == 21\n ```"], ["tf.function", "description: Compiles a function into a callable TensorFlow graph. (deprecated arguments) (deprecated arguments)", false, "Compiles a function into a callable TensorFlow graph. (deprecated arguments) (deprecated arguments)\n\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(experimental_compile)`. They will be removed in a future version.\nInstructions for updating:\nexperimental_compile is deprecated, use jit_compile instead\n\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(experimental_relax_shapes)`. They will be removed in a future version.\nInstructions for updating:\nexperimental_relax_shapes is deprecated, use reduce_retracing instead\n\n`tf.function` constructs a `tf.types.experimental.GenericFunction` that\nexecutes a TensorFlow graph (`tf.Graph`) created by trace-compiling the\nTensorFlow operations in `func`. More information on the topic can be found\nin [Introduction to Graphs and tf.function]\n(https://www.tensorflow.org/guide/intro_to_graphs).\n\nSee [Better Performance with tf.function]\n(https://www.tensorflow.org/guide/function) for tips on performance and\nknown limitations.\n\nExample usage:\n\n>>> @tf.function\n... def f(x, y):\n... return x ** 2 + y\n>>> x = tf.constant([2, 3])\n>>> y = tf.constant([3, -2])\n>>> f(x, y)\n\n\nThe trace-compilation allows non-TensorFlow operations to execute, but under\nspecial conditions. In general, only TensorFlow operations are guaranteed to\nrun and create fresh results whenever the `GenericFunction` is called.\n\n## Features\n\n`func` may use data-dependent Python control flow statements, including `if`,\n`for`, `while` `break`, `continue` and `return`:\n\n>>> @tf.function\n... def f(x):\n... if tf.reduce_sum(x) > 0:\n... return x * x\n... else:\n... return -x // 2\n>>> f(tf.constant(-2))\n\n\n`func`'s closure may include `tf.Tensor` and `tf.Variable` objects:\n\n>>> @tf.function\n... def f():\n... return x ** 2 + y\n>>> x = tf.constant([-2, -3])\n>>> y = tf.Variable([3, -2])\n>>> f()\n\n\n`func` may also use ops with side effects, such as `tf.print`, `tf.Variable`\nand others:\n\n>>> v = tf.Variable(1)\n>>> @tf.function\n... def f(x):\n... for i in tf.range(x):\n... v.assign_add(i)\n>>> f(3)\n>>> v\n\n\nImportant: Any Python side-effects (appending to a list, printing with\n`print`, etc) will only happen once, when `func` is traced. To have\nside-effects executed into your `tf.function` they need to be written\nas TF ops:\n\n>>> l = []\n>>> @tf.function\n... def f(x):\n... for i in x:\n... l.append(i + 1) # Caution! Will only happen once when tracing\n>>> f(tf.constant([1, 2, 3]))\n>>> l\n[]\n\nInstead, use TensorFlow collections like `tf.TensorArray`:\n\n>>> @tf.function\n... def f(x):\n... ta = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True)\n... for i in range(len(x)):\n... ta = ta.write(i, x[i] + 1)\n... return ta.stack()\n>>> f(tf.constant([1, 2, 3]))\n\n\n## `tf.function` creates polymorphic callables\n\nInternally, `tf.types.experimental.GenericFunction` may contain multiple\n`tf.types.experimental.ConcreteFunction`s, each specialized to arguments with\ndifferent data types or shapes, since TensorFlow can perform more\noptimizations on graphs of specific shapes, dtypes and values of constant\narguments. `tf.function` treats any pure Python values as opaque objects (best\nthought of as compile-time constants), and builds a separate `tf.Graph` for\neach set of Python arguments that it encounters.\nFor more information, see the\n[tf.function guide](https://www.tensorflow.org/guide/function#rules_of_tracing)\n\nExecuting a `GenericFunction` will select and execute the appropriate\n`ConcreteFunction` based on the argument types and values.\n\nTo obtain an individual `ConcreteFunction`, use the\n`GenericFunction.get_concrete_function` method. It can be called with the\nsame arguments as `func` and returns a\n`tf.types.experimental.ConcreteFunction`. `ConcreteFunction`s are backed by a\nsingle `tf.Graph`:\n\n>>> @tf.function\n... def f(x):\n... return x + 1\n>>> isinstance(f.get_concrete_function(1).graph, tf.Graph)\nTrue\n\n`ConcreteFunction`s can be executed just like `GenericFunction`s, but their\ninput is resticted to the types to which they're specialized.\n\n## Retracing\n\n`ConcreteFunctions` are built (traced) on the fly, as the `GenericFunction` is\ncalled with new TensorFlow types or shapes, or with new Python values as\narguments. When `GenericFunction` builds a new trace, it is said that `func`\nis retraced. Retracing is a frequent performance concern for `tf.function` as\nit can be considerably slower than executing a graph that's already been\ntraced. It is ideal to minimize the amount of retracing in your code.\n\nCaution: Passing python scalars or lists as arguments to `tf.function` will\nusually retrace. To avoid this, pass numeric arguments as Tensors whenever\npossible:\n\n>>> @tf.function\n... def f(x):\n... return tf.abs(x)\n>>> f1 = f.get_concrete_function(1)\n>>> f2 = f.get_concrete_function(2) # Slow - compiles new graph\n>>> f1 is f2\nFalse\n>>> f1 = f.get_concrete_function(tf.constant(1))\n>>> f2 = f.get_concrete_function(tf.constant(2)) # Fast - reuses f1\n>>> f1 is f2\nTrue\n\nPython numerical arguments should only be used when they take few distinct\nvalues, such as hyperparameters like the number of layers in a neural network.\n\n## Input signatures\n\nFor Tensor arguments, `GenericFunction`creates a new `ConcreteFunction` for\nevery unique set of input shapes and datatypes. The example below creates two\nseparate `ConcreteFunction`s, each specialized to a different shape:\n\n>>> @tf.function\n... def f(x):\n... return x + 1\n>>> vector = tf.constant([1.0, 1.0])\n>>> matrix = tf.constant([[3.0]])\n>>> f.get_concrete_function(vector) is f.get_concrete_function(matrix)\nFalse\n\nAn \"input signature\" can be optionally provided to `tf.function` to control\nthis process. The input signature specifies the shape and type of each\nTensor argument to the function using a `tf.TensorSpec` object. More general\nshapes can be used. This ensures only one `ConcreteFunction` is created, and\nrestricts the `GenericFunction` to the specified shapes and types. It is\nan effective way to limit retracing when Tensors have dynamic shapes.\n\n>>> @tf.function(\n... input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])\n... def f(x):\n... return x + 1\n>>> vector = tf.constant([1.0, 1.0])\n>>> matrix = tf.constant([[3.0]])\n>>> f.get_concrete_function(vector) is f.get_concrete_function(matrix)\nTrue\n\n## Variables may only be created once\n\n`tf.function` only allows creating new `tf.Variable` objects when it is called\nfor the first time:\n\n>>> class MyModule(tf.Module):\n... def __init__(self):\n... self.v = None\n...\n... @tf.function\n... def __call__(self, x):\n... if self.v is None:\n... self.v = tf.Variable(tf.ones_like(x))\n... return self.v * x\n\nIn general, it is recommended to create `tf.Variable`s outside of\n`tf.function`.\nIn simple cases, persisting state across `tf.function` boundaries may be\nimplemented using a pure functional style in which state is represented by\n`tf.Tensor`s passed as arguments and returned as return values.\n\nContrast the two styles below:\n\n>>> state = tf.Variable(1)\n>>> @tf.function\n... def f(x):\n... state.assign_add(x)\n>>> f(tf.constant(2)) # Non-pure functional style\n>>> state\n\n\n>>> state = tf.constant(1)\n>>> @tf.function\n... def f(state, x):\n... state += x\n... return state\n>>> state = f(state, tf.constant(2)) # Pure functional style\n>>> state\n\n\n## Python operations execute only once per trace\n\n`func` may contain TensorFlow operations mixed with pure Python operations.\nHowever, when the function is executed, only the TensorFlow operations will\nrun. The Python operations run only once, at trace time. If TensorFlow\noperations depend on results from Pyhton operations, those results will be\nfrozen into the graph.\n\n>>> @tf.function\n... def f(a, b):\n... print('this runs at trace time; a is', a, 'and b is', b)\n... return b\n>>> f(1, tf.constant(1))\nthis runs at trace time; a is 1 and b is Tensor(\"...\", shape=(), dtype=int32)\n\n\n>>> f(1, tf.constant(2))\n\n\n>>> f(2, tf.constant(1))\nthis runs at trace time; a is 2 and b is Tensor(\"...\", shape=(), dtype=int32)\n\n\n>>> f(2, tf.constant(2))\n\n\n## Using type annotations to improve performance\n\n'experimental_follow_type_hints` can be used along with type annotations to\nreduce retracing by automatically casting any Python values to `tf.Tensor`\n(something that is not done by default, unless you use input signatures).\n\n>>> @tf.function(experimental_follow_type_hints=True)\n... def f_with_hints(x: tf.Tensor):\n... print('Tracing')\n... return x\n>>> @tf.function(experimental_follow_type_hints=False)\n... def f_no_hints(x: tf.Tensor):\n... print('Tracing')\n... return x\n>>> f_no_hints(1)\nTracing\n\n>>> f_no_hints(2)\nTracing\n\n>>> f_with_hints(1)\nTracing\n\n>>> f_with_hints(2)\n\n\nArgs:\n func: the function to be compiled. If `func` is None, `tf.function` returns\n a decorator that can be invoked with a single argument - `func`. In other\n words, `tf.function(input_signature=...)(func)` is equivalent to\n `tf.function(func, input_signature=...)`. The former can be used as\n decorator.\n input_signature: A possibly nested sequence of `tf.TensorSpec` objects\n specifying the shapes and dtypes of the Tensors that will be supplied to\n this function. If `None`, a separate function is instantiated for each\n inferred input signature. If input_signature is specified, every input to\n `func` must be a `Tensor`, and `func` cannot accept `**kwargs`.\n autograph: Whether autograph should be applied on `func` before tracing a\n graph. Data-dependent Python control flow statements require\n `autograph=True`. For more information, see the\n [tf.function and AutoGraph guide](\n https://www.tensorflow.org/guide/function#autograph_transformations).\n jit_compile: If `True`, compiles the function using\n [XLA](https://tensorflow.org/xla). XLA performs compiler optimizations,\n such as fusion, and attempts to emit more efficient code. This may\n drastically improve the performance. If set to `True`,\n the whole function needs to be compilable by XLA, or an\n `errors.InvalidArgumentError` is thrown.\n If `None` (default), compiles the function with XLA when running on TPU\n and goes through the regular function execution path when running on\n other devices.\n If `False`, executes the function without XLA compilation. Set this value\n to `False` when directly running a multi-device function on TPUs (e.g. two\n TPU cores, one TPU core and its host CPU).\n Not all functions are compilable, see a list of\n [sharp corners](https://tensorflow.org/xla/known_issues).\n reduce_retracing: When True, `tf.function` attempts to reduce the\n amount of retracing, for example by using more generic shapes. This\n can be controlled for user objects by customizing their associated\n `tf.types.experimental.TraceType`.\n experimental_implements: If provided, contains a name of a \"known\" function\n this implements. For example \"mycompany.my_recurrent_cell\".\n This is stored as an attribute in inference function,\n which can then be detected when processing serialized function.\n See [standardizing composite ops](https://github.com/tensorflow/community/blob/master/rfcs/20190610-standardizing-composite_ops.md) # pylint: disable=line-too-long\n for details. For an example of utilizing this attribute see this\n [example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc)\n The code above automatically detects and substitutes function that\n implements \"embedded_matmul\" and allows TFLite to substitute its own\n implementations. For instance, a tensorflow user can use this\n attribute to mark that their function also implements\n `embedded_matmul` (perhaps more efficiently!)\n by specifying it using this parameter:\n `@tf.function(experimental_implements=\"embedded_matmul\")`\n This can either be specified as just the string name of the function or\n a NameAttrList corresponding to a list of key-value attributes associated\n with the function name. The name of the function will be in the 'name'\n field of the NameAttrList. To define a formal TF op for this function\n implements, try the experimental [composite TF](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/tfr)\n project.\n experimental_autograph_options: Optional tuple of\n `tf.autograph.experimental.Feature` values.\n experimental_relax_shapes: Deprecated. Use `reduce_retracing`\n instead.\n experimental_compile: Deprecated alias to 'jit_compile'.\n experimental_follow_type_hints: When True, the function may use type\n annotations from `func` to optimize the tracing performance. For example,\n arguments annotated with `tf.Tensor` will automatically be converted\n to a Tensor.\n\nReturns:\n If `func` is not None, returns a `tf.types.experimental.GenericFunction`.\n If `func` is None, returns a decorator that, when invoked with a single\n `func` argument, returns a `tf.types.experimental.GenericFunction`.\n\nRaises:\n `ValueError` when attempting to use `jit_compile=True`, but XLA support is\n not available."], ["tf.gather", "description: Gather slices from params axis axis according to indices. (deprecated arguments)", false, "Gather slices from params axis `axis` according to indices. (deprecated arguments)\n\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(validate_indices)`. They will be removed in a future version.\nInstructions for updating:\nThe `validate_indices` argument has no effect. Indices are always validated on CPU and never validated on GPU.\n\nGather slices from `params` axis `axis` according to `indices`. `indices`\nmust be an integer tensor of any dimension (often 1-D).\n\n`Tensor.__getitem__` works for scalars, `tf.newaxis`, and\n[python slices](https://numpy.org/doc/stable/reference/arrays.indexing.html#basic-slicing-and-indexing)\n\n`tf.gather` extends indexing to handle tensors of indices.\n\nIn the simplest case it's identical to scalar indexing:\n\n>>> params = tf.constant(['p0', 'p1', 'p2', 'p3', 'p4', 'p5'])\n>>> params[3].numpy()\nb'p3'\n>>> tf.gather(params, 3).numpy()\nb'p3'\n\nThe most common case is to pass a single axis tensor of indices (this\ncan't be expressed as a python slice because the indices are not sequential):\n\n>>> indices = [2, 0, 2, 5]\n>>> tf.gather(params, indices).numpy()\narray([b'p2', b'p0', b'p2', b'p5'], dtype=object)\n\n
\n\n
\n\nThe indices can have any shape. When the `params` has 1 axis, the\noutput shape is equal to the input shape:\n\n>>> tf.gather(params, [[2, 0], [2, 5]]).numpy()\narray([[b'p2', b'p0'],\n [b'p2', b'p5']], dtype=object)\n\nThe `params` may also have any shape. `gather` can select slices\nacross any axis depending on the `axis` argument (which defaults to 0).\nBelow it is used to gather first rows, then columns from a matrix:\n\n>>> params = tf.constant([[0, 1.0, 2.0],\n... [10.0, 11.0, 12.0],\n... [20.0, 21.0, 22.0],\n... [30.0, 31.0, 32.0]])\n>>> tf.gather(params, indices=[3,1]).numpy()\narray([[30., 31., 32.],\n [10., 11., 12.]], dtype=float32)\n>>> tf.gather(params, indices=[2,1], axis=1).numpy()\narray([[ 2., 1.],\n [12., 11.],\n [22., 21.],\n [32., 31.]], dtype=float32)\n\nMore generally: The output shape has the same shape as the input, with the\nindexed-axis replaced by the shape of the indices.\n\n>>> def result_shape(p_shape, i_shape, axis=0):\n... return p_shape[:axis] + i_shape + p_shape[axis+1:]\n>>>\n>>> result_shape([1, 2, 3], [], axis=1)\n[1, 3]\n>>> result_shape([1, 2, 3], [7], axis=1)\n[1, 7, 3]\n>>> result_shape([1, 2, 3], [7, 5], axis=1)\n[1, 7, 5, 3]\n\nHere are some examples:\n\n>>> params.shape.as_list()\n[4, 3]\n>>> indices = tf.constant([[0, 2]])\n>>> tf.gather(params, indices=indices, axis=0).shape.as_list()\n[1, 2, 3]\n>>> tf.gather(params, indices=indices, axis=1).shape.as_list()\n[4, 1, 2]\n\n>>> params = tf.random.normal(shape=(5, 6, 7, 8))\n>>> indices = tf.random.uniform(shape=(10, 11), maxval=7, dtype=tf.int32)\n>>> result = tf.gather(params, indices, axis=2)\n>>> result.shape.as_list()\n[5, 6, 10, 11, 8]\n\nThis is because each index takes a slice from `params`, and\nplaces it at the corresponding location in the output. For the above example\n\n>>> # For any location in indices\n>>> a, b = 0, 1\n>>> tf.reduce_all(\n... # the corresponding slice of the result\n... result[:, :, a, b, :] ==\n... # is equal to the slice of `params` along `axis` at the index.\n... params[:, :, indices[a, b], :]\n... ).numpy()\nTrue\n\n### Batching:\n\nThe `batch_dims` argument lets you gather different items from each element\nof a batch.\n\nUsing `batch_dims=1` is equivalent to having an outer loop over the first\naxis of `params` and `indices`:\n\n>>> params = tf.constant([\n... [0, 0, 1, 0, 2],\n... [3, 0, 0, 0, 4],\n... [0, 5, 0, 6, 0]])\n>>> indices = tf.constant([\n... [2, 4],\n... [0, 4],\n... [1, 3]])\n\n>>> tf.gather(params, indices, axis=1, batch_dims=1).numpy()\narray([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n\nThis is equivalent to:\n\n>>> def manually_batched_gather(params, indices, axis):\n... batch_dims=1\n... result = []\n... for p,i in zip(params, indices):\n... r = tf.gather(p, i, axis=axis-batch_dims)\n... result.append(r)\n... return tf.stack(result)\n>>> manually_batched_gather(params, indices, axis=1).numpy()\narray([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n\nHigher values of `batch_dims` are equivalent to multiple nested loops over\nthe outer axes of `params` and `indices`. So the overall shape function is\n\n>>> def batched_result_shape(p_shape, i_shape, axis=0, batch_dims=0):\n... return p_shape[:axis] + i_shape[batch_dims:] + p_shape[axis+1:]\n>>>\n>>> batched_result_shape(\n... p_shape=params.shape.as_list(),\n... i_shape=indices.shape.as_list(),\n... axis=1,\n... batch_dims=1)\n[3, 2]\n\n>>> tf.gather(params, indices, axis=1, batch_dims=1).shape.as_list()\n[3, 2]\n\nThis comes up naturally if you need to use the indices of an operation like\n`tf.argsort`, or `tf.math.top_k` where the last dimension of the indices\nindexes into the last dimension of input, at the corresponding location.\nIn this case you can use `tf.gather(values, indices, batch_dims=-1)`.\n\nSee also:\n\n* `tf.Tensor.__getitem__`: The direct tensor index operation (`t[]`), handles\n scalars and python-slices `tensor[..., 7, 1:-1]`\n* `tf.scatter`: A collection of operations similar to `__setitem__`\n (`t[i] = x`)\n* `tf.gather_nd`: An operation similar to `tf.gather` but gathers across\n multiple axis at once (it can gather elements of a matrix instead of rows\n or columns)\n* `tf.boolean_mask`, `tf.where`: Binary indexing.\n* `tf.slice` and `tf.strided_slice`: For lower level access to the\n implementation of `__getitem__`'s python-slice handling (`t[1:-1:2]`)\n\nArgs:\n params: The `Tensor` from which to gather values. Must be at least rank\n `axis + 1`.\n indices: The index `Tensor`. Must be one of the following types: `int32`,\n `int64`. The values must be in range `[0, params.shape[axis])`.\n validate_indices: Deprecated, does nothing. Indices are always validated on\n CPU, never validated on GPU.\n\n Caution: On CPU, if an out of bound index is found, an error is raised.\n On GPU, if an out of bound index is found, a 0 is stored in the\n corresponding output value.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The\n `axis` in `params` to gather `indices` from. Must be greater than or equal\n to `batch_dims`. Defaults to the first non-batch dimension. Supports\n negative indexes.\n batch_dims: An `integer`. The number of batch dimensions. Must be less\n than or equal to `rank(indices)`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor`. Has the same type as `params`."], ["tf.gather_nd", "description: Gather slices from params into a Tensor with shape specified by indices.", false, "Gather slices from `params` into a Tensor with shape specified by `indices`.\n\n `indices` is a `Tensor` of indices into `params`. The index vectors are\n arranged along the last axis of `indices`.\n\n This is similar to `tf.gather`, in which `indices` defines slices into the\n first dimension of `params`. In `tf.gather_nd`, `indices` defines slices into the\n first `N` dimensions of `params`, where `N = indices.shape[-1]`.\n\n Caution: On CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, a 0 is stored in the\n corresponding output value.\n\n ## Gathering scalars\n\n In the simplest case the vectors in `indices` index the full rank of `params`:\n\n >>> tf.gather_nd(\n ... indices=[[0, 0],\n ... [1, 1]],\n ... params = [['a', 'b'],\n ... ['c', 'd']]).numpy()\n array([b'a', b'd'], dtype=object)\n\n In this case the result has 1-axis fewer than `indices`, and each index vector\n is replaced by the scalar indexed from `params`.\n\n In this case the shape relationship is:\n\n ```\n index_depth = indices.shape[-1]\n assert index_depth == params.shape.rank\n result_shape = indices.shape[:-1]\n ```\n\n If `indices` has a rank of `K`, it is helpful to think `indices` as a\n (K-1)-dimensional tensor of indices into `params`.\n\n ## Gathering slices\n\n If the index vectors do not index the full rank of `params` then each location\n in the result contains a slice of params. This example collects rows from a\n matrix:\n\n >>> tf.gather_nd(\n ... indices = [[1],\n ... [0]],\n ... params = [['a', 'b', 'c'],\n ... ['d', 'e', 'f']]).numpy()\n array([[b'd', b'e', b'f'],\n [b'a', b'b', b'c']], dtype=object)\n\n Here `indices` contains `[2]` index vectors, each with a length of `1`.\n The index vectors each refer to rows of the `params` matrix. Each\n row has a shape of `[3]` so the output shape is `[2, 3]`.\n\n In this case, the relationship between the shapes is:\n\n ```\n index_depth = indices.shape[-1]\n outer_shape = indices.shape[:-1]\n assert index_depth <= params.shape.rank\n inner_shape = params.shape[index_depth:]\n output_shape = outer_shape + inner_shape\n ```\n\n It is helpful to think of the results in this case as tensors-of-tensors.\n The shape of the outer tensor is set by the leading dimensions of `indices`.\n While the shape of the inner tensors is the shape of a single slice.\n\n ## Batches\n\n Additionally both `params` and `indices` can have `M` leading batch\n dimensions that exactly match. In this case `batch_dims` must be set to `M`.\n\n For example, to collect one row from each of a batch of matrices you could\n set the leading elements of the index vectors to be their location in the\n batch:\n\n >>> tf.gather_nd(\n ... indices = [[0, 1],\n ... [1, 0],\n ... [2, 4],\n ... [3, 2],\n ... [4, 1]],\n ... params=tf.zeros([5, 7, 3])).shape.as_list()\n [5, 3]\n\n The `batch_dims` argument lets you omit those leading location dimensions\n from the index:\n\n >>> tf.gather_nd(\n ... batch_dims=1,\n ... indices = [[1],\n ... [0],\n ... [4],\n ... [2],\n ... [1]],\n ... params=tf.zeros([5, 7, 3])).shape.as_list()\n [5, 3]\n\n This is equivalent to caling a separate `gather_nd` for each location in the\n batch dimensions.\n\n\n >>> params=tf.zeros([5, 7, 3])\n >>> indices=tf.zeros([5, 1])\n >>> batch_dims = 1\n >>>\n >>> index_depth = indices.shape[-1]\n >>> batch_shape = indices.shape[:batch_dims]\n >>> assert params.shape[:batch_dims] == batch_shape\n >>> outer_shape = indices.shape[batch_dims:-1]\n >>> assert index_depth <= params.shape.rank\n >>> inner_shape = params.shape[batch_dims + index_depth:]\n >>> output_shape = batch_shape + outer_shape + inner_shape\n >>> output_shape.as_list()\n [5, 3]\n\n ### More examples\n\n Indexing into a 3-tensor:\n\n >>> tf.gather_nd(\n ... indices = [[1]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[[b'a1', b'b1'],\n [b'c1', b'd1']]], dtype=object)\n\n\n\n >>> tf.gather_nd(\n ... indices = [[0, 1], [1, 0]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[b'c0', b'd0'],\n [b'a1', b'b1']], dtype=object)\n\n\n >>> tf.gather_nd(\n ... indices = [[0, 0, 1], [1, 0, 1]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([b'b0', b'b1'], dtype=object)\n\n The examples below are for the case when only indices have leading extra\n dimensions. If both 'params' and 'indices' have leading batch dimensions, use\n the 'batch_dims' parameter to run gather_nd in batch mode.\n\n Batched indexing into a matrix:\n\n >>> tf.gather_nd(\n ... indices = [[[0, 0]], [[0, 1]]],\n ... params = [['a', 'b'], ['c', 'd']]).numpy()\n array([[b'a'],\n [b'b']], dtype=object)\n\n\n\n Batched slice indexing into a matrix:\n\n >>> tf.gather_nd(\n ... indices = [[[1]], [[0]]],\n ... params = [['a', 'b'], ['c', 'd']]).numpy()\n array([[[b'c', b'd']],\n [[b'a', b'b']]], dtype=object)\n\n\n Batched indexing into a 3-tensor:\n\n >>> tf.gather_nd(\n ... indices = [[[1]], [[0]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[[[b'a1', b'b1'],\n [b'c1', b'd1']]],\n [[[b'a0', b'b0'],\n [b'c0', b'd0']]]], dtype=object)\n\n\n >>> tf.gather_nd(\n ... indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[[b'c0', b'd0'],\n [b'a1', b'b1']],\n [[b'a0', b'b0'],\n [b'c1', b'd1']]], dtype=object)\n\n >>> tf.gather_nd(\n ... indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[b'b0', b'b1'],\n [b'd0', b'c1']], dtype=object)\n\n\n Examples with batched 'params' and 'indices':\n\n >>> tf.gather_nd(\n ... batch_dims = 1,\n ... indices = [[1],\n ... [0]],\n ... params = [[['a0', 'b0'],\n ... ['c0', 'd0']],\n ... [['a1', 'b1'],\n ... ['c1', 'd1']]]).numpy()\n array([[b'c0', b'd0'],\n [b'a1', b'b1']], dtype=object)\n\n\n >>> tf.gather_nd(\n ... batch_dims = 1,\n ... indices = [[[1]], [[0]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[[b'c0', b'd0']],\n [[b'a1', b'b1']]], dtype=object)\n\n >>> tf.gather_nd(\n ... batch_dims = 1,\n ... indices = [[[1, 0]], [[0, 1]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[b'c0'],\n [b'b1']], dtype=object)\n\n\n See also `tf.gather`.\n\n Args:\n params: A `Tensor`. The tensor from which to gather values.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n name: A name for the operation (optional).\n batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.\n\n Returns:\n A `Tensor`. Has the same type as `params`.\n "], ["tf.get_current_name_scope", "description: Returns current full name scope specified by tf.name_scope(...)s.", false, "Returns current full name scope specified by `tf.name_scope(...)`s.\n\n For example,\n ```python\n with tf.name_scope(\"outer\"):\n tf.get_current_name_scope() # \"outer\"\n\n with tf.name_scope(\"inner\"):\n tf.get_current_name_scope() # \"outer/inner\"\n ```\n\n In other words, `tf.get_current_name_scope()` returns the op name prefix that\n will be prepended to, if an op is created at that place.\n\n Note that `@tf.function` resets the name scope stack as shown below.\n\n ```\n with tf.name_scope(\"outer\"):\n\n @tf.function\n def foo(x):\n with tf.name_scope(\"inner\"):\n return tf.add(x * x) # Op name is \"inner/Add\", not \"outer/inner/Add\"\n ```\n "], ["tf.get_logger", "description: Return TF logger instance.", false, "Return TF logger instance."], ["tf.get_static_value", "description: Returns the constant value of the given tensor, if efficiently calculable.", false, "Returns the constant value of the given tensor, if efficiently calculable.\n\n This function attempts to partially evaluate the given tensor, and\n returns its value as a numpy ndarray if this succeeds.\n\n Example usage:\n\n >>> a = tf.constant(10)\n >>> tf.get_static_value(a)\n 10\n >>> b = tf.constant(20)\n >>> tf.get_static_value(tf.add(a, b))\n 30\n\n >>> # `tf.Variable` is not supported.\n >>> c = tf.Variable(30)\n >>> print(tf.get_static_value(c))\n None\n\n Using `partial` option is most relevant when calling `get_static_value` inside\n a `tf.function`. Setting it to `True` will return the results but for the\n values that cannot be evaluated will be `None`. For example:\n\n ```python\n class Foo(object):\n def __init__(self):\n self.a = tf.Variable(1)\n self.b = tf.constant(2)\n\n @tf.function\n def bar(self, partial):\n packed = tf.raw_ops.Pack(values=[self.a, self.b])\n static_val = tf.get_static_value(packed, partial=partial)\n tf.print(static_val)\n\n f = Foo()\n f.bar(partial=True) # `array([None, array(2, dtype=int32)], dtype=object)`\n f.bar(partial=False) # `None`\n ```\n\n Compatibility(V1): If `constant_value(tensor)` returns a non-`None` result, it\n will no longer be possible to feed a different value for `tensor`. This allows\n the result of this function to influence the graph that is constructed, and\n permits static shape optimizations.\n\n Args:\n tensor: The Tensor to be evaluated.\n partial: If True, the returned numpy array is allowed to have partially\n evaluated values. Values that can't be evaluated will be None.\n\n Returns:\n A numpy ndarray containing the constant value of the given `tensor`,\n or None if it cannot be calculated.\n\n Raises:\n TypeError: if tensor is not an ops.Tensor.\n "], ["tf.gradients", "description: Constructs symbolic derivatives of sum of ys w.r.t. x in xs.", false, "Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.\n\n `tf.gradients` is only valid in a graph context. In particular,\n it is valid in the context of a `tf.function` wrapper, where code\n is executing as a graph.\n\n `ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`\n is a list of `Tensor`, holding the gradients received by the\n `ys`. The list must be the same length as `ys`.\n\n `gradients()` adds ops to the graph to output the derivatives of `ys` with\n respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where\n each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`.\n\n `grad_ys` is a list of tensors of the same length as `ys` that holds\n the initial gradients for each y in `ys`. When `grad_ys` is None,\n we fill in a tensor of '1's of the shape of y for each y in `ys`. A\n user can provide their own initial `grad_ys` to compute the\n derivatives using a different initial gradient for each y (e.g., if\n one wanted to weight the gradient differently for each value in\n each y).\n\n `stop_gradients` is a `Tensor` or a list of tensors to be considered constant\n with respect to all `xs`. These tensors will not be backpropagated through,\n as though they had been explicitly disconnected using `stop_gradient`. Among\n other things, this allows computation of partial derivatives as opposed to\n total derivatives. For example:\n\n >>> @tf.function\n ... def example():\n ... a = tf.constant(0.)\n ... b = 2 * a\n ... return tf.gradients(a + b, [a, b], stop_gradients=[a, b])\n >>> example()\n [,\n ]\n\n Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the\n total derivatives `tf.gradients(a + b, [a, b])`, which take into account the\n influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is\n equivalent to:\n\n >>> @tf.function\n ... def example():\n ... a = tf.stop_gradient(tf.constant(0.))\n ... b = tf.stop_gradient(2 * a)\n ... return tf.gradients(a + b, [a, b])\n >>> example()\n [,\n ]\n\n `stop_gradients` provides a way of stopping gradient after the graph has\n already been constructed, as compared to `tf.stop_gradient` which is used\n during graph construction. When the two approaches are combined,\n backpropagation stops at both `tf.stop_gradient` nodes and nodes in\n `stop_gradients`, whichever is encountered first.\n\n All integer tensors are considered constant with respect to all `xs`, as if\n they were included in `stop_gradients`.\n\n `unconnected_gradients` determines the value returned for each x in xs if it\n is unconnected in the graph to ys. By default this is None to safeguard\n against errors. Mathematically these gradients are zero which can be requested\n using the `'zero'` option. `tf.UnconnectedGradients` provides the\n following options and behaviors:\n\n >>> @tf.function\n ... def example(use_zero):\n ... a = tf.ones([1, 2])\n ... b = tf.ones([3, 1])\n ... if use_zero:\n ... return tf.gradients([b], [a], unconnected_gradients='zero')\n ... else:\n ... return tf.gradients([b], [a], unconnected_gradients='none')\n >>> example(False)\n [None]\n >>> example(True)\n []\n\n Let us take one practical example which comes during the back propogation\n phase. This function is used to evaluate the derivatives of the cost function\n with respect to Weights `Ws` and Biases `bs`. Below sample implementation\n provides the exaplantion of what it is actually used for :\n\n >>> @tf.function\n ... def example():\n ... Ws = tf.constant(0.)\n ... bs = 2 * Ws\n ... cost = Ws + bs # This is just an example. Please ignore the formulas.\n ... g = tf.gradients(cost, [Ws, bs])\n ... dCost_dW, dCost_db = g\n ... return dCost_dW, dCost_db\n >>> example()\n (,\n )\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n grad_ys: Optional. A `Tensor` or list of tensors the same size as\n `ys` and holding the gradients computed for each y in `ys`.\n name: Optional name to use for grouping all the gradient ops together.\n defaults to 'gradients'.\n gate_gradients: If True, add a tuple around the gradients returned\n for an operations. This avoids some race conditions.\n aggregation_method: Specifies the method used to combine gradient terms.\n Accepted values are constants defined in the class `AggregationMethod`.\n stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate\n through.\n unconnected_gradients: Optional. Specifies the gradient value returned when\n the given input tensors are unconnected. Accepted values are constants\n defined in the class `tf.UnconnectedGradients` and the default value is\n `none`.\n\n Returns:\n A list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`\n for y in `ys` and for x in `xs`.\n\n Raises:\n LookupError: if one of the operations between `x` and `y` does not\n have a registered gradient function.\n ValueError: if the arguments are invalid.\n RuntimeError: if called in Eager mode.\n\n "], ["tf.GradientTape", "description: Record operations for automatic differentiation.", false, "Record operations for automatic differentiation.\n\n Operations are recorded if they are executed within this context manager and\n at least one of their inputs is being \"watched\".\n\n Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,\n where `trainable=True` is default in both cases) are automatically watched.\n Tensors can be manually watched by invoking the `watch` method on this context\n manager.\n\n For example, consider the function `y = x * x`. The gradient at `x = 3.0` can\n be computed as:\n\n >>> x = tf.constant(3.0)\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... y = x * x\n >>> dy_dx = g.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(6.0, shape=(), dtype=float32)\n\n GradientTapes can be nested to compute higher-order derivatives. For example,\n\n >>> x = tf.constant(5.0)\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... with tf.GradientTape() as gg:\n ... gg.watch(x)\n ... y = x * x\n ... dy_dx = gg.gradient(y, x) # dy_dx = 2 * x\n >>> d2y_dx2 = g.gradient(dy_dx, x) # d2y_dx2 = 2\n >>> print(dy_dx)\n tf.Tensor(10.0, shape=(), dtype=float32)\n >>> print(d2y_dx2)\n tf.Tensor(2.0, shape=(), dtype=float32)\n\n By default, the resources held by a GradientTape are released as soon as\n GradientTape.gradient() method is called. To compute multiple gradients over\n the same computation, create a persistent gradient tape. This allows multiple\n calls to the gradient() method as resources are released when the tape object\n is garbage collected. For example:\n\n >>> x = tf.constant(3.0)\n >>> with tf.GradientTape(persistent=True) as g:\n ... g.watch(x)\n ... y = x * x\n ... z = y * y\n >>> dz_dx = g.gradient(z, x) # (4*x^3 at x = 3)\n >>> print(dz_dx)\n tf.Tensor(108.0, shape=(), dtype=float32)\n >>> dy_dx = g.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(6.0, shape=(), dtype=float32)\n\n By default GradientTape will automatically watch any trainable variables that\n are accessed inside the context. If you want fine grained control over which\n variables are watched you can disable automatic tracking by passing\n `watch_accessed_variables=False` to the tape constructor:\n\n >>> x = tf.Variable(2.0)\n >>> w = tf.Variable(5.0)\n >>> with tf.GradientTape(\n ... watch_accessed_variables=False, persistent=True) as tape:\n ... tape.watch(x)\n ... y = x ** 2 # Gradients will be available for `x`.\n ... z = w ** 3 # No gradients will be available as `w` isn't being watched.\n >>> dy_dx = tape.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(4.0, shape=(), dtype=float32)\n >>> # No gradients will be available as `w` isn't being watched.\n >>> dz_dw = tape.gradient(z, w)\n >>> print(dz_dw)\n None\n\n Note that when using models you should ensure that your variables exist when\n using `watch_accessed_variables=False`. Otherwise it's quite easy to make your\n first iteration not have any gradients:\n\n ```python\n a = tf.keras.layers.Dense(32)\n b = tf.keras.layers.Dense(32)\n\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(a.variables) # Since `a.build` has not been called at this point\n # `a.variables` will return an empty list and the\n # tape will not be watching anything.\n result = b(a(inputs))\n tape.gradient(result, a.variables) # The result of this computation will be\n # a list of `None`s since a's variables\n # are not being watched.\n ```\n\n Note that only tensors with real or complex dtypes are differentiable.\n "], ["tf.grad_pass_through", "description: Creates a grad-pass-through op with the forward behavior provided in f.", false, "Creates a grad-pass-through op with the forward behavior provided in f.\n\n Use this function to wrap any op, maintaining its behavior in the forward\n pass, but replacing the original op in the backward graph with an identity.\n For example:\n\n ```python\n x = tf.Variable(1.0, name=\"x\")\n z = tf.Variable(3.0, name=\"z\")\n\n with tf.GradientTape() as tape:\n # y will evaluate to 9.0\n y = tf.grad_pass_through(x.assign)(z**2)\n # grads will evaluate to 6.0\n grads = tape.gradient(y, z)\n ```\n\n Another example is a 'differentiable' moving average approximation, where\n gradients are allowed to flow into the last value fed to the moving average,\n but the moving average is still used for the forward pass:\n\n ```python\n x = ... # Some scalar value\n # A moving average object, we don't need to know how this is implemented\n moving_average = MovingAverage()\n with backprop.GradientTape() as tape:\n # mavg_x will evaluate to the current running average value\n mavg_x = tf.grad_pass_through(moving_average)(x)\n grads = tape.gradient(mavg_x, x) # grads will evaluate to 1.0\n ```\n\n Args:\n f: function `f(*x)` that returns a `Tensor` or nested structure of `Tensor`\n outputs.\n\n Returns:\n A function `h(x)` which returns the same values as `f(x)` and whose\n gradients are the same as those of an identity function.\n "], ["tf.Graph", "description: A TensorFlow computation, represented as a dataflow graph.", false, "A TensorFlow computation, represented as a dataflow graph.\n\n Graphs are used by `tf.function`s to represent the function's computations.\n Each graph contains a set of `tf.Operation` objects, which represent units of\n computation; and `tf.Tensor` objects, which represent the units of data that\n flow between operations.\n\n ### Using graphs directly (deprecated)\n\n A `tf.Graph` can be constructed and used directly without a `tf.function`, as\n was required in TensorFlow 1, but this is deprecated and it is recommended to\n use a `tf.function` instead. If a graph is directly used, other deprecated\n TensorFlow 1 classes are also required to execute the graph, such as a\n `tf.compat.v1.Session`.\n\n A default graph can be registered with the `tf.Graph.as_default` context\n manager. Then, operations will be added to the graph instead of being executed\n eagerly. For example:\n\n ```python\n g = tf.Graph()\n with g.as_default():\n # Define operations and tensors in `g`.\n c = tf.constant(30.0)\n assert c.graph is g\n ```\n\n `tf.compat.v1.get_default_graph()` can be used to obtain the default graph.\n\n Important note: This class *is not* thread-safe for graph construction. All\n operations should be created from a single thread, or external\n synchronization must be provided. Unless otherwise specified, all methods\n are not thread-safe.\n\n A `Graph` instance supports an arbitrary number of \"collections\"\n that are identified by name. For convenience when building a large\n graph, collections can store groups of related objects: for\n example, the `tf.Variable` uses a collection (named\n `tf.GraphKeys.GLOBAL_VARIABLES`) for\n all variables that are created during the construction of a graph. The caller\n may define additional collections by specifying a new name.\n "], ["tf.graph_util", "description: Helpers to manipulate a tensor graph in python.", true, "Helpers to manipulate a tensor graph in python.\n\n"], ["tf.group", "description: Create an op that groups multiple operations.", false, "Create an op that groups multiple operations.\n\n When this op finishes, all ops in `inputs` have finished. This op has no\n output.\n\n Note: *In TensorFlow 2 with eager and/or Autograph, you should not require\n this method, as ops execute in the expected order thanks to automatic control\n dependencies.* Only use `tf.group` when working with v1\n `tf.Graph` code.\n\n When operating in a v1-style graph context, ops are not executed in the same\n order as specified in the code; TensorFlow will attempt to execute ops in\n parallel or in an order convenient to the result it is computing. `tf.group`\n allows you to request that one or more results finish before execution\n continues.\n\n `tf.group` creates a single op (of type `NoOp`), and then adds appropriate\n control dependencies. Thus, `c = tf.group(a, b)` will compute the same graph\n as this:\n\n with tf.control_dependencies([a, b]):\n c = tf.no_op()\n\n See also `tf.tuple` and\n `tf.control_dependencies`.\n\n Args:\n *inputs: Zero or more tensors to group.\n name: A name for this operation (optional).\n\n Returns:\n An Operation that executes all its inputs.\n\n Raises:\n ValueError: If an unknown keyword argument is provided.\n "], ["tf.guarantee_const", "description: Promise to the TF runtime that the input tensor is a constant. (deprecated)", false, "Promise to the TF runtime that the input tensor is a constant. (deprecated)\n\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\nInstructions for updating:\nNot for public use.\n\nThe runtime is then free to make optimizations based on this.\n\nReturns the input tensor without modification.\n\nArgs:\n input: A `Tensor`.\n name: A name for this operation.\n\nReturns:\n A `Tensor`. Has the same dtype as `input`."], ["tf.hessians", "description: Constructs the Hessian of sum of ys with respect to x in xs.", false, "Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.\n\n `hessians()` adds ops to the graph to output the Hessian matrix of `ys`\n with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`\n where each tensor is the Hessian of `sum(ys)`.\n\n The Hessian is a matrix of second-order partial derivatives of a scalar\n tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n gate_gradients: See `gradients()` documentation for details.\n aggregation_method: See `gradients()` documentation for details.\n name: Optional name to use for grouping all the gradient ops together.\n defaults to 'hessians'.\n\n Returns:\n A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.\n\n Raises:\n LookupError: if one of the operations between `xs` and `ys` does not\n have a registered gradient function.\n "], ["tf.histogram_fixed_width", "description: Return histogram of values.", false, "Return histogram of values.\n\n Given the tensor `values`, this operation returns a rank 1 histogram counting\n the number of entries in `values` that fell into every bin. The bins are\n equal width and determined by the arguments `value_range` and `nbins`.\n\n Args:\n values: Numeric `Tensor`.\n value_range: Shape [2] `Tensor` of same `dtype` as `values`.\n values <= value_range[0] will be mapped to hist[0],\n values >= value_range[1] will be mapped to hist[-1].\n nbins: Scalar `int32 Tensor`. Number of histogram bins.\n dtype: dtype for returned histogram.\n name: A name for this operation (defaults to 'histogram_fixed_width').\n\n Returns:\n A 1-D `Tensor` holding histogram of values.\n\n Raises:\n TypeError: If any unsupported dtype is provided.\n tf.errors.InvalidArgumentError: If value_range does not\n satisfy value_range[0] < value_range[1].\n\n Examples:\n\n >>> # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)\n ...\n >>> nbins = 5\n >>> value_range = [0.0, 5.0]\n >>> new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]\n >>> hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)\n >>> hist.numpy()\n array([2, 1, 1, 0, 2], dtype=int32)\n "], ["tf.histogram_fixed_width_bins", "description: Bins the given values for use in a histogram.", false, "Bins the given values for use in a histogram.\n\n Given the tensor `values`, this operation returns a rank 1 `Tensor`\n representing the indices of a histogram into which each element\n of `values` would be binned. The bins are equal width and\n determined by the arguments `value_range` and `nbins`.\n\n Args:\n values: Numeric `Tensor`.\n value_range: Shape [2] `Tensor` of same `dtype` as `values`.\n values <= value_range[0] will be mapped to hist[0],\n values >= value_range[1] will be mapped to hist[-1].\n nbins: Scalar `int32 Tensor`. Number of histogram bins.\n dtype: dtype for returned histogram.\n name: A name for this operation (defaults to 'histogram_fixed_width').\n\n Returns:\n A `Tensor` holding the indices of the binned values whose shape matches\n `values`.\n\n Raises:\n TypeError: If any unsupported dtype is provided.\n tf.errors.InvalidArgumentError: If value_range does not\n satisfy value_range[0] < value_range[1].\n\n Examples:\n\n >>> # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)\n ...\n >>> nbins = 5\n >>> value_range = [0.0, 5.0]\n >>> new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]\n >>> indices = tf.histogram_fixed_width_bins(new_values, value_range, nbins=5)\n >>> indices.numpy()\n array([0, 0, 1, 2, 4, 4], dtype=int32)\n "], ["tf.identity", "description: Return a Tensor with the same shape and contents as input.", false, "Return a Tensor with the same shape and contents as input.\n\n The return value is not the same Tensor as the original, but contains the same\n values. This operation is fast when used on the same device.\n\n For example:\n\n >>> a = tf.constant([0.78])\n >>> a_identity = tf.identity(a)\n >>> a.numpy()\n array([0.78], dtype=float32)\n >>> a_identity.numpy()\n array([0.78], dtype=float32)\n\n Calling `tf.identity` on a variable will make a Tensor that represents the\n value of that variable at the time it is called. This is equivalent to calling\n `.read_value()`.\n\n >>> a = tf.Variable(5)\n >>> a_identity = tf.identity(a)\n >>> a.assign_add(1)\n \n >>> a.numpy()\n 6\n >>> a_identity.numpy()\n 5\n\n Args:\n input: A `Tensor`, a `Variable`, a `CompositeTensor` or anything that can be\n converted to a tensor using `tf.convert_to_tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or CompositeTensor. Has the same type and contents as `input`.\n "], ["tf.identity_n", "description: Returns a list of tensors with the same shapes and contents as the input", false, "Returns a list of tensors with the same shapes and contents as the input\n\n tensors.\n\n This op can be used to override the gradient for complicated functions. For\n example, suppose y = f(x) and we wish to apply a custom function g for backprop\n such that dx = g(dy). In Python,\n\n ```python\n with tf.get_default_graph().gradient_override_map(\n {'IdentityN': 'OverrideGradientWithG'}):\n y, _ = identity_n([f(x), x])\n\n @tf.RegisterGradient('OverrideGradientWithG')\n def ApplyG(op, dy, _):\n return [None, g(dy)] # Do not backprop to f(x).\n ```\n\n Args:\n input: A list of `Tensor` objects.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects. Has the same type as `input`.\n "], ["tf.image", "description: Image ops.", true, "Image ops.\n\nThe `tf.image` module contains various functions for image\nprocessing and decoding-encoding Ops.\n\nMany of the encoding/decoding functions are also available in the\ncore `tf.io` module.\n\n## Image processing\n\n### Resizing\n\nThe resizing Ops accept input images as tensors of several types. They always\noutput resized images as float32 tensors.\n\nThe convenience function `tf.image.resize` supports both 4-D\nand 3-D tensors as input and output. 4-D tensors are for batches of images,\n3-D tensors for individual images.\n\nResized images will be distorted if their original aspect ratio is not the\nsame as size. To avoid distortions see tf.image.resize_with_pad.\n\n* `tf.image.resize`\n* `tf.image.resize_with_pad`\n* `tf.image.resize_with_crop_or_pad`\n\nThe Class `tf.image.ResizeMethod` provides various resize methods like\n`bilinear`, `nearest_neighbor`.\n\n### Converting Between Colorspaces\n\nImage ops work either on individual images or on batches of images, depending on\nthe shape of their input Tensor.\n\nIf 3-D, the shape is `[height, width, channels]`, and the Tensor represents one\nimage. If 4-D, the shape is `[batch_size, height, width, channels]`, and the\nTensor represents `batch_size` images.\n\nCurrently, `channels` can usefully be 1, 2, 3, or 4. Single-channel images are\ngrayscale, images with 3 channels are encoded as either RGB or HSV. Images\nwith 2 or 4 channels include an alpha channel, which has to be stripped from the\nimage before passing the image to most image processing functions (and can be\nre-attached later).\n\nInternally, images are either stored in as one `float32` per channel per pixel\n(implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel\nper pixel (values are assumed to lie in `[0,255]`).\n\nTensorFlow can convert between images in RGB or HSV or YIQ.\n\n* `tf.image.rgb_to_grayscale`, `tf.image.grayscale_to_rgb`\n* `tf.image.rgb_to_hsv`, `tf.image.hsv_to_rgb`\n* `tf.image.rgb_to_yiq`, `tf.image.yiq_to_rgb`\n* `tf.image.rgb_to_yuv`, `tf.image.yuv_to_rgb`\n* `tf.image.image_gradients`\n* `tf.image.convert_image_dtype`\n\n### Image Adjustments\n\nTensorFlow provides functions to adjust images in various ways: brightness,\ncontrast, hue, and saturation. Each adjustment can be done with predefined\nparameters or with random parameters picked from predefined intervals. Random\nadjustments are often useful to expand a training set and reduce overfitting.\n\nIf several adjustments are chained it is advisable to minimize the number of\nredundant conversions by first converting the images to the most natural data\ntype and representation.\n\n* `tf.image.adjust_brightness`\n* `tf.image.adjust_contrast`\n* `tf.image.adjust_gamma`\n* `tf.image.adjust_hue`\n* `tf.image.adjust_jpeg_quality`\n* `tf.image.adjust_saturation`\n* `tf.image.random_brightness`\n* `tf.image.random_contrast`\n* `tf.image.random_hue`\n* `tf.image.random_saturation`\n* `tf.image.per_image_standardization`\n\n### Working with Bounding Boxes\n\n* `tf.image.draw_bounding_boxes`\n* `tf.image.combined_non_max_suppression`\n* `tf.image.generate_bounding_box_proposals`\n* `tf.image.non_max_suppression`\n* `tf.image.non_max_suppression_overlaps`\n* `tf.image.non_max_suppression_padded`\n* `tf.image.non_max_suppression_with_scores`\n* `tf.image.pad_to_bounding_box`\n* `tf.image.sample_distorted_bounding_box`\n\n### Cropping\n\n* `tf.image.central_crop`\n* `tf.image.crop_and_resize`\n* `tf.image.crop_to_bounding_box`\n* `tf.io.decode_and_crop_jpeg`\n* `tf.image.extract_glimpse`\n* `tf.image.random_crop`\n* `tf.image.resize_with_crop_or_pad`\n\n### Flipping, Rotating and Transposing\n\n* `tf.image.flip_left_right`\n* `tf.image.flip_up_down`\n* `tf.image.random_flip_left_right`\n* `tf.image.random_flip_up_down`\n* `tf.image.rot90`\n* `tf.image.transpose`\n\n## Image decoding and encoding\n\nTensorFlow provides Ops to decode and encode JPEG and PNG formats. Encoded\nimages are represented by scalar string Tensors, decoded images by 3-D uint8\ntensors of shape `[height, width, channels]`. (PNG also supports uint16.)\n\nNote: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`\n\nThe encode and decode Ops apply to one image at a time. Their input and output\nare all of variable size. If you need fixed size images, pass the output of\nthe decode Ops to one of the cropping and resizing Ops.\n\n* `tf.io.decode_bmp`\n* `tf.io.decode_gif`\n* `tf.io.decode_image`\n* `tf.io.decode_jpeg`\n* `tf.io.decode_and_crop_jpeg`\n* `tf.io.decode_png`\n* `tf.io.encode_jpeg`\n* `tf.io.encode_png`\n\n\n"], ["tf.IndexedSlices", "description: A sparse representation of a set of tensor slices at given indices.", false, "A sparse representation of a set of tensor slices at given indices.\n\n This class is a simple wrapper for a pair of `Tensor` objects:\n\n * `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.\n * `indices`: A 1-D integer `Tensor` with shape `[D0]`.\n\n An `IndexedSlices` is typically used to represent a subset of a larger\n tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.\n The values in `indices` are the indices in the first dimension of\n the slices that have been extracted from the larger tensor.\n\n The dense tensor `dense` represented by an `IndexedSlices` `slices` has\n\n ```python\n dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]\n ```\n\n The `IndexedSlices` class is used principally in the definition of\n gradients for operations that have sparse gradients\n (e.g. `tf.gather`).\n\n >>> v = tf.Variable([[0.,1, 2], [2, 3, 4], [4, 5, 6], [6, 7, 8]])\n >>> with tf.GradientTape() as tape:\n ... r = tf.gather(v, [1,3])\n >>> index_slices = tape.gradient(r,v)\n >>> index_slices\n <...IndexedSlices object ...>\n >>> index_slices.indices.numpy()\n array([1, 3], dtype=int32)\n >>> index_slices.values.numpy()\n array([[1., 1., 1.],\n [1., 1., 1.]], dtype=float32)\n\n Contrast this representation with\n `tf.sparse.SparseTensor`,\n which uses multi-dimensional indices and scalar values.\n "], ["tf.IndexedSlicesSpec", "description: Type specification for a tf.IndexedSlices.", false, "Type specification for a `tf.IndexedSlices`."], ["tf.init_scope", "description: A context manager that lifts ops out of control-flow scopes and function-building graphs.", false, "A context manager that lifts ops out of control-flow scopes and function-building graphs.\n\n There is often a need to lift variable initialization ops out of control-flow\n scopes, function-building graphs, and gradient tapes. Entering an\n `init_scope` is a mechanism for satisfying these desiderata. In particular,\n entering an `init_scope` has three effects:\n\n (1) All control dependencies are cleared the moment the scope is entered;\n this is equivalent to entering the context manager returned from\n `control_dependencies(None)`, which has the side-effect of exiting\n control-flow scopes like `tf.cond` and `tf.while_loop`.\n\n (2) All operations that are created while the scope is active are lifted\n into the lowest context on the `context_stack` that is not building a\n graph function. Here, a context is defined as either a graph or an eager\n context. Every context switch, i.e., every installation of a graph as\n the default graph and every switch into eager mode, is logged in a\n thread-local stack called `context_switches`; the log entry for a\n context switch is popped from the stack when the context is exited.\n Entering an `init_scope` is equivalent to crawling up\n `context_switches`, finding the first context that is not building a\n graph function, and entering it. A caveat is that if graph mode is\n enabled but the default graph stack is empty, then entering an\n `init_scope` will simply install a fresh graph as the default one.\n\n (3) The gradient tape is paused while the scope is active.\n\n When eager execution is enabled, code inside an init_scope block runs with\n eager execution enabled even when tracing a `tf.function`. For example:\n\n ```python\n tf.compat.v1.enable_eager_execution()\n\n @tf.function\n def func():\n # A function constructs TensorFlow graphs,\n # it does not execute eagerly.\n assert not tf.executing_eagerly()\n with tf.init_scope():\n # Initialization runs with eager execution enabled\n assert tf.executing_eagerly()\n ```\n\n Raises:\n RuntimeError: if graph state is incompatible with this initialization.\n "], ["tf.inside_function", "description: Indicates whether the caller code is executing inside a tf.function.", false, "Indicates whether the caller code is executing inside a `tf.function`.\n\n Returns:\n Boolean, True if the caller code is executing inside a `tf.function`\n rather than eagerly.\n\n Example:\n\n >>> tf.inside_function()\n False\n >>> @tf.function\n ... def f():\n ... print(tf.inside_function())\n >>> f()\n True\n "], ["tf.io", "description: Public API for tf.io namespace.", true, "Public API for tf.io namespace.\n"], ["tf.is_tensor", "description: Checks whether x is a TF-native type that can be passed to many TF ops.", false, "Checks whether `x` is a TF-native type that can be passed to many TF ops.\n\n Use `is_tensor` to differentiate types that can ingested by TensorFlow ops\n without any conversion (e.g., `tf.Tensor`, `tf.SparseTensor`, and\n `tf.RaggedTensor`) from types that need to be converted into tensors before\n they are ingested (e.g., numpy `ndarray` and Python scalars).\n\n For example, in the following code block:\n\n ```python\n if not tf.is_tensor(t):\n t = tf.convert_to_tensor(t)\n return t.shape, t.dtype\n ```\n\n we check to make sure that `t` is a tensor (and convert it if not) before\n accessing its `shape` and `dtype`. (But note that not all TensorFlow native\n types have shapes or dtypes; `tf.data.Dataset` is an example of a TensorFlow\n native type that has neither shape nor dtype.)\n\n Args:\n x: A python object to check.\n\n Returns:\n `True` if `x` is a TensorFlow-native type.\n "], ["tf.keras", "description: Implementation of the Keras API, the high-level API of TensorFlow.", true, "Implementation of the Keras API, the high-level API of TensorFlow.\n\nDetailed documentation and user guides are available at\n[keras.io](https://keras.io).\n\n"], ["tf.linalg", "description: Operations for linear algebra.", true, "Operations for linear algebra.\n"], ["tf.linspace", "description: Generates evenly-spaced values in an interval along a given axis.", false, "Generates evenly-spaced values in an interval along a given axis.\n\n A sequence of `num` evenly-spaced values are generated beginning at `start`\n along a given `axis`.\n If `num > 1`, the values in the sequence increase by\n `(stop - start) / (num - 1)`, so that the last one is exactly `stop`.\n If `num <= 0`, `ValueError` is raised.\n\n Matches\n [np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s\n behaviour\n except when `num == 0`.\n\n For example:\n\n ```\n tf.linspace(10.0, 12.0, 3, name=\"linspace\") => [ 10.0 11.0 12.0]\n ```\n\n `Start` and `stop` can be tensors of arbitrary size:\n\n >>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)\n \n\n `Axis` is where the values will be generated (the dimension in the\n returned tensor which corresponds to the axis will be equal to `num`)\n\n >>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)\n \n\n\n\n Args:\n start: A `Tensor`. Must be one of the following types: `bfloat16`,\n `float32`, `float64`. N-D tensor. First entry in the range.\n stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.\n Last entry in the range.\n num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D\n tensor. Number of values to generate.\n name: A name for the operation (optional).\n axis: Axis along which the operation is performed (used only when N-D\n tensors are provided).\n\n Returns:\n A `Tensor`. Has the same type as `start`.\n "], ["tf.lite", "description: Public API for tf.lite namespace.", true, "Public API for tf.lite namespace.\n"], ["tf.load_library", "description: Loads a TensorFlow plugin.", false, "Loads a TensorFlow plugin.\n\n \"library_location\" can be a path to a specific shared object, or a folder.\n If it is a folder, all shared objects that are named \"libtfkernel*\" will be\n loaded. When the library is loaded, kernels registered in the library via the\n `REGISTER_*` macros are made available in the TensorFlow process.\n\n Args:\n library_location: Path to the plugin or the folder of plugins.\n Relative or absolute filesystem path to a dynamic library file or folder.\n\n Returns:\n None\n\n Raises:\n OSError: When the file to be loaded is not found.\n RuntimeError: when unable to load the library.\n "], ["tf.load_op_library", "description: Loads a TensorFlow plugin, containing custom ops and kernels.", false, "Loads a TensorFlow plugin, containing custom ops and kernels.\n\n Pass \"library_filename\" to a platform-specific mechanism for dynamically\n loading a library. The rules for determining the exact location of the\n library are platform-specific and are not documented here. When the\n library is loaded, ops and kernels registered in the library via the\n `REGISTER_*` macros are made available in the TensorFlow process. Note\n that ops with the same name as an existing op are rejected and not\n registered with the process.\n\n Args:\n library_filename: Path to the plugin.\n Relative or absolute filesystem path to a dynamic library file.\n\n Returns:\n A python module containing the Python wrappers for Ops defined in\n the plugin.\n\n Raises:\n RuntimeError: when unable to load the library or get the python wrappers.\n "], ["tf.lookup", "description: Public API for tf.lookup namespace.", true, "Public API for tf.lookup namespace.\n"], ["tf.make_ndarray", "description: Create a numpy ndarray from a tensor.", false, "Create a numpy ndarray from a tensor.\n\n Create a numpy ndarray with the same shape and data as the tensor.\n\n For example:\n\n ```python\n # Tensor a has shape (2,3)\n a = tf.constant([[1,2,3],[4,5,6]])\n proto_tensor = tf.make_tensor_proto(a) # convert `tensor a` to a proto tensor\n tf.make_ndarray(proto_tensor) # output: array([[1, 2, 3],\n # [4, 5, 6]], dtype=int32)\n # output has shape (2,3)\n ```\n\n Args:\n tensor: A TensorProto.\n\n Returns:\n A numpy array with the tensor contents.\n\n Raises:\n TypeError: if tensor has unsupported type.\n\n "], ["tf.make_tensor_proto", "description: Create a TensorProto.", false, "Create a TensorProto.\n\n In TensorFlow 2.0, representing tensors as protos should no longer be a\n common workflow. That said, this utility function is still useful for\n generating TF Serving request protos:\n\n ```python\n request = tensorflow_serving.apis.predict_pb2.PredictRequest()\n request.model_spec.name = \"my_model\"\n request.model_spec.signature_name = \"serving_default\"\n request.inputs[\"images\"].CopyFrom(tf.make_tensor_proto(X_new))\n ```\n\n `make_tensor_proto` accepts \"values\" of a python scalar, a python list, a\n numpy ndarray, or a numpy scalar.\n\n If \"values\" is a python scalar or a python list, make_tensor_proto\n first convert it to numpy ndarray. If dtype is None, the\n conversion tries its best to infer the right numpy data\n type. Otherwise, the resulting numpy array has a compatible data\n type with the given dtype.\n\n In either case above, the numpy ndarray (either the caller provided\n or the auto-converted) must have the compatible type with dtype.\n\n `make_tensor_proto` then converts the numpy array to a tensor proto.\n\n If \"shape\" is None, the resulting tensor proto represents the numpy\n array precisely.\n\n Otherwise, \"shape\" specifies the tensor's shape and the numpy array\n can not have more elements than what \"shape\" specifies.\n\n Args:\n values: Values to put in the TensorProto.\n dtype: Optional tensor_pb2 DataType value.\n shape: List of integers representing the dimensions of tensor.\n verify_shape: Boolean that enables verification of a shape of values.\n allow_broadcast: Boolean that enables allowing scalars and 1 length vector\n broadcasting. Cannot be true when verify_shape is true.\n\n Returns:\n A `TensorProto`. Depending on the type, it may contain data in the\n \"tensor_content\" attribute, which is not directly useful to Python programs.\n To access the values you should convert the proto back to a numpy ndarray\n with `tf.make_ndarray(proto)`.\n\n If `values` is a `TensorProto`, it is immediately returned; `dtype` and\n `shape` are ignored.\n\n Raises:\n TypeError: if unsupported types are provided.\n ValueError: if arguments have inappropriate values or if verify_shape is\n True and shape of values is not equals to a shape from the argument.\n\n "], ["tf.map_fn", "description: Transforms elems by applying fn to each element unstacked on axis 0. (deprecated arguments)", false, "Transforms `elems` by applying `fn` to each element unstacked on axis 0. (deprecated arguments)\n\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(dtype)`. They will be removed in a future version.\nInstructions for updating:\nUse fn_output_signature instead\n\nSee also `tf.scan`.\n\n`map_fn` unstacks `elems` on axis 0 to obtain a sequence of elements;\ncalls `fn` to transform each element; and then stacks the transformed\nvalues back together.\n\n#### Mapping functions with single-Tensor inputs and outputs\n\nIf `elems` is a single tensor and `fn`'s signature is `tf.Tensor->tf.Tensor`,\nthen `map_fn(fn, elems)` is equivalent to\n`tf.stack([fn(elem) for elem in tf.unstack(elems)])`. E.g.:\n\n>>> tf.map_fn(fn=lambda t: tf.range(t, t + 3), elems=tf.constant([3, 5, 2]))\n\n\n`map_fn(fn, elems).shape = [elems.shape[0]] + fn(elems[0]).shape`.\n\n#### Mapping functions with multi-arity inputs and outputs\n\n`map_fn` also supports functions with multi-arity inputs and outputs:\n\n* If `elems` is a tuple (or nested structure) of tensors, then those tensors\n must all have the same outer-dimension size (`num_elems`); and `fn` is\n used to transform each tuple (or structure) of corresponding slices from\n `elems`. E.g., if `elems` is a tuple `(t1, t2, t3)`, then `fn` is used to\n transform each tuple of slices `(t1[i], t2[i], t3[i])`\n (where `0 <= i < num_elems`).\n\n* If `fn` returns a tuple (or nested structure) of tensors, then the\n result is formed by stacking corresponding elements from those structures.\n\n#### Specifying `fn`'s output signature\n\nIf `fn`'s input and output signatures are different, then the output\nsignature must be specified using `fn_output_signature`. (The input and\noutput signatures are differ if their structures, dtypes, or tensor types do\nnot match). E.g.:\n\n>>> tf.map_fn(fn=tf.strings.length, # input & output have different dtypes\n... elems=tf.constant([\"hello\", \"moon\"]),\n... fn_output_signature=tf.int32)\n\n>>> tf.map_fn(fn=tf.strings.join, # input & output have different structures\n... elems=[tf.constant(['The', 'A']), tf.constant(['Dog', 'Cat'])],\n... fn_output_signature=tf.string)\n\n\n`fn_output_signature` can be specified using any of the following:\n\n* A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)\n* A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)\n* A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)\n* A (possibly nested) tuple, list, or dict containing the above types.\n\n#### RaggedTensors\n\n`map_fn` supports `tf.RaggedTensor` inputs and outputs. In particular:\n\n* If `elems` is a `RaggedTensor`, then `fn` will be called with each\n row of that ragged tensor.\n * If `elems` has only one ragged dimension, then the values passed to\n `fn` will be `tf.Tensor`s.\n * If `elems` has multiple ragged dimensions, then the values passed to\n `fn` will be `tf.RaggedTensor`s with one fewer ragged dimension.\n\n* If the result of `map_fn` should be a `RaggedTensor`, then use a\n `tf.RaggedTensorSpec` to specify `fn_output_signature`.\n * If `fn` returns `tf.Tensor`s with varying sizes, then use a\n `tf.RaggedTensorSpec` with `ragged_rank=0` to combine them into a\n single ragged tensor (which will have ragged_rank=1).\n * If `fn` returns `tf.RaggedTensor`s, then use a `tf.RaggedTensorSpec`\n with the same `ragged_rank`.\n\n>>> # Example: RaggedTensor input\n>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])\n>>> tf.map_fn(tf.reduce_sum, rt, fn_output_signature=tf.int32)\n\n\n>>> # Example: RaggedTensor output\n>>> elems = tf.constant([3, 5, 0, 2])\n>>> tf.map_fn(tf.range, elems,\n... fn_output_signature=tf.RaggedTensorSpec(shape=[None],\n... dtype=tf.int32))\n\n\nNote: `map_fn` should only be used if you need to map a function over the\n*rows* of a `RaggedTensor`. If you wish to map a function over the\nindividual values, then you should use:\n\n* `tf.ragged.map_flat_values(fn, rt)`\n (if fn is expressible as TensorFlow ops)\n* `rt.with_flat_values(map_fn(fn, rt.flat_values))`\n (otherwise)\n\nE.g.:\n\n>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])\n>>> tf.ragged.map_flat_values(lambda x: x + 2, rt)\n\n\n#### SparseTensors\n\n`map_fn` supports `tf.sparse.SparseTensor` inputs and outputs. In particular:\n\n* If `elems` is a `SparseTensor`, then `fn` will be called with each row\n of that sparse tensor. In particular, the value passed to `fn` will be a\n `tf.sparse.SparseTensor` with one fewer dimension than `elems`.\n\n* If the result of `map_fn` should be a `SparseTensor`, then use a\n `tf.SparseTensorSpec` to specify `fn_output_signature`. The individual\n `SparseTensor`s returned by `fn` will be stacked into a single\n `SparseTensor` with one more dimension.\n\n>>> # Example: SparseTensor input\n>>> st = tf.sparse.SparseTensor([[0, 0], [2, 0], [2, 1]], [2, 3, 4], [4, 4])\n>>> tf.map_fn(tf.sparse.reduce_sum, st, fn_output_signature=tf.int32)\n\n\n>>> # Example: SparseTensor output\n>>> tf.sparse.to_dense(\n... tf.map_fn(tf.sparse.eye, tf.constant([2, 3]),\n... fn_output_signature=tf.SparseTensorSpec(None, tf.float32)))\n\n\nNote: `map_fn` should only be used if you need to map a function over the\n*rows* of a `SparseTensor`. If you wish to map a function over the nonzero\nvalues, then you should use:\n\n* If the function is expressible as TensorFlow ops, use:\n ```python\n tf.sparse.SparseTensor(st.indices, fn(st.values), st.dense_shape)\n ```\n* Otherwise, use:\n ```python\n tf.sparse.SparseTensor(st.indices, tf.map_fn(fn, st.values),\n st.dense_shape)\n ```\n\n#### `map_fn` vs. vectorized operations\n\n`map_fn` will apply the operations used by `fn` to each element of `elems`,\nresulting in `O(elems.shape[0])` total operations. This is somewhat\nmitigated by the fact that `map_fn` can process elements in parallel.\nHowever, a transform expressed using `map_fn` is still typically less\nefficient than an equivalent transform expressed using vectorized operations.\n\n`map_fn` should typically only be used if one of the following is true:\n\n* It is difficult or expensive to express the desired transform with\n vectorized operations.\n* `fn` creates large intermediate values, so an equivalent vectorized\n transform would take too much memory.\n* Processing elements in parallel is more efficient than an equivalent\n vectorized transform.\n* Efficiency of the transform is not critical, and using `map_fn` is\n more readable.\n\nE.g., the example given above that maps `fn=lambda t: tf.range(t, t + 3)`\nacross `elems` could be rewritten more efficiently using vectorized ops:\n\n>>> elems = tf.constant([3, 5, 2])\n>>> tf.range(3) + tf.expand_dims(elems, 1)\n\n\nIn some cases, `tf.vectorized_map` can be used to automatically convert a\nfunction to a vectorized equivalent.\n\n#### Eager execution\n\nWhen executing eagerly, `map_fn` does not execute in parallel even if\n`parallel_iterations` is set to a value > 1. You can still get the\nperformance benefits of running a function in parallel by using the\n`tf.function` decorator:\n\n>>> fn=lambda t: tf.range(t, t + 3)\n>>> @tf.function\n... def func(elems):\n... return tf.map_fn(fn, elems, parallel_iterations=3)\n>>> func(tf.constant([3, 5, 2]))\n\n\n\nNote: if you use the `tf.function` decorator, any non-TensorFlow Python\ncode that you may have written in your function won't get executed. See\n`tf.function` for more details. The recommendation would be to debug without\n`tf.function` but switch to it to get performance benefits of running `map_fn`\nin parallel.\n\nArgs:\n fn: The callable to be performed. It accepts one argument, which will have\n the same (possibly nested) structure as `elems`. Its output must have the\n same structure as `fn_output_signature` if one is provided; otherwise it\n must have the same structure as `elems`.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unstacked along their first dimension. `fn` will be applied to the\n nested sequence of the resulting slices. `elems` may include ragged and\n sparse tensors. `elems` must consist of at least one tensor.\n dtype: Deprecated: Equivalent to `fn_output_signature`.\n parallel_iterations: (optional) The number of iterations allowed to run in\n parallel. When graph building, the default value is 10. While executing\n eagerly, the default value is set to 1.\n back_prop: (optional) Deprecated: prefer using `tf.stop_gradient` instead. False disables support for back propagation.\n swap_memory: (optional) True enables GPU-CPU memory swapping.\n infer_shape: (optional) False disables tests for consistent output shapes.\n name: (optional) Name prefix for the returned tensors.\n fn_output_signature: The output signature of `fn`. Must be specified if\n `fn`'s input and output signatures are different (i.e., if their\n structures, dtypes, or tensor types do not match).\n `fn_output_signature` can be specified using any of the following:\n\n * A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)\n * A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)\n * A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)\n * A (possibly nested) tuple, list, or dict containing the above types.\n\nReturns:\n A tensor or (possibly nested) sequence of tensors. Each tensor stacks the\n results of applying `fn` to tensors unstacked from `elems` along the first\n dimension, from first to last. The result may include ragged and sparse\n tensors.\n\nRaises:\n TypeError: if `fn` is not callable or the structure of the output of\n `fn` and `fn_output_signature` do not match.\n ValueError: if the lengths of the output of `fn` and `fn_output_signature`\n do not match, or if the `elems` does not contain any tensor.\n\nExamples:\n\n >>> elems = np.array([1, 2, 3, 4, 5, 6])\n >>> tf.map_fn(lambda x: x * x, elems)\n \n\n >>> elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))\n >>> tf.map_fn(lambda x: x[0] * x[1], elems, fn_output_signature=tf.int64)\n \n\n >>> elems = np.array([1, 2, 3])\n >>> tf.map_fn(lambda x: (x, -x), elems,\n ... fn_output_signature=(tf.int64, tf.int64))\n (,\n )"], ["tf.math", "description: Math Operations.", true, "Math Operations.\n\nNote: Functions taking `Tensor` arguments can also take anything accepted by\n`tf.convert_to_tensor`.\n\nNote: Elementwise binary operations in TensorFlow follow [numpy-style\nbroadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).\n\nTensorFlow provides a variety of math functions including:\n\n* Basic arithmetic operators and trigonometric functions.\n* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)\n* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)\n* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)\n* Segment functions (like: `tf.math.segment_sum`)\n\nSee: `tf.linalg` for matrix and tensor functions.\n\n\n\n## About Segmentation\n\nTensorFlow provides several operations that you can use to perform common\nmath computations on tensor segments.\nHere a segmentation is a partitioning of a tensor along\nthe first dimension, i.e. it defines a mapping from the first dimension onto\n`segment_ids`. The `segment_ids` tensor should be the size of\nthe first dimension, `d0`, with consecutive IDs in the range `0` to `k`,\nwhere `k [[0 0 0 0]\n# [5 6 7 8]]\n```\n\nThe standard `segment_*` functions assert that the segment indices are sorted.\nIf you have unsorted indices use the equivalent `unsorted_segment_` function.\nThese functions take an additional argument `num_segments` so that the output\ntensor can be efficiently allocated.\n\n``` python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\ntf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)\n# ==> [[ 6, 8, 10, 12],\n# [-1, -2, -3, -4]]\n```\n\n\n"], ["tf.meshgrid", "description: Broadcasts parameters for evaluation on an N-D grid.", false, "Broadcasts parameters for evaluation on an N-D grid.\n\n Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`\n of N-D coordinate arrays for evaluating expressions on an N-D grid.\n\n Notes:\n\n `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.\n When the `indexing` argument is set to 'xy' (the default), the broadcasting\n instructions for the first two dimensions are swapped.\n\n Examples:\n\n Calling `X, Y = meshgrid(x, y)` with the tensors\n\n ```python\n x = [1, 2, 3]\n y = [4, 5, 6]\n X, Y = tf.meshgrid(x, y)\n # X = [[1, 2, 3],\n # [1, 2, 3],\n # [1, 2, 3]]\n # Y = [[4, 4, 4],\n # [5, 5, 5],\n # [6, 6, 6]]\n ```\n\n Args:\n *args: `Tensor`s with rank 1.\n **kwargs:\n - indexing: Either 'xy' or 'ij' (optional, default: 'xy').\n - name: A name for the operation (optional).\n\n Returns:\n outputs: A list of N `Tensor`s with rank N.\n\n Raises:\n TypeError: When no keyword arguments (kwargs) are passed.\n ValueError: When indexing keyword argument is not one of `xy` or `ij`.\n "], ["tf.mlir", "description: Public API for tf.mlir namespace.", true, "Public API for tf.mlir namespace.\n"], ["tf.Module", "description: Base neural network module class.", false, "Base neural network module class.\n\n A module is a named container for `tf.Variable`s, other `tf.Module`s and\n functions which apply to user input. For example a dense layer in a neural\n network might be implemented as a `tf.Module`:\n\n >>> class Dense(tf.Module):\n ... def __init__(self, input_dim, output_size, name=None):\n ... super(Dense, self).__init__(name=name)\n ... self.w = tf.Variable(\n ... tf.random.normal([input_dim, output_size]), name='w')\n ... self.b = tf.Variable(tf.zeros([output_size]), name='b')\n ... def __call__(self, x):\n ... y = tf.matmul(x, self.w) + self.b\n ... return tf.nn.relu(y)\n\n You can use the Dense layer as you would expect:\n\n >>> d = Dense(input_dim=3, output_size=2)\n >>> d(tf.ones([1, 3]))\n \n\n\n By subclassing `tf.Module` instead of `object` any `tf.Variable` or\n `tf.Module` instances assigned to object properties can be collected using\n the `variables`, `trainable_variables` or `submodules` property:\n\n >>> d.variables\n (,\n )\n\n\n Subclasses of `tf.Module` can also take advantage of the `_flatten` method\n which can be used to implement tracking of any other types.\n\n All `tf.Module` classes have an associated `tf.name_scope` which can be used\n to group operations in TensorBoard and create hierarchies for variable names\n which can help with debugging. We suggest using the name scope when creating\n nested submodules/parameters or for forward methods whose graph you might want\n to inspect in TensorBoard. You can enter the name scope explicitly using\n `with self.name_scope:` or you can annotate methods (apart from `__init__`)\n with `@tf.Module.with_name_scope`.\n\n >>> class MLP(tf.Module):\n ... def __init__(self, input_size, sizes, name=None):\n ... super(MLP, self).__init__(name=name)\n ... self.layers = []\n ... with self.name_scope:\n ... for size in sizes:\n ... self.layers.append(Dense(input_dim=input_size, output_size=size))\n ... input_size = size\n ... @tf.Module.with_name_scope\n ... def __call__(self, x):\n ... for layer in self.layers:\n ... x = layer(x)\n ... return x\n\n >>> module = MLP(input_size=5, sizes=[5, 5])\n >>> module.variables\n (,\n ,\n ,\n )\n "], ["tf.name_scope", "description: A context manager for use when defining a Python op.", false, "A context manager for use when defining a Python op.\n\n This context manager pushes a name scope, which will make the name of all\n operations added within it have a prefix.\n\n For example, to define a new Python op called `my_op`:\n\n ```python\n def my_op(a, b, c, name=None):\n with tf.name_scope(\"MyOp\") as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n b = tf.convert_to_tensor(b, name=\"b\")\n c = tf.convert_to_tensor(c, name=\"c\")\n # Define some computation that uses `a`, `b`, and `c`.\n return foo_op(..., name=scope)\n ```\n\n When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,\n and `MyOp/c`.\n\n Inside a `tf.function`, if the scope name already exists, the name will be\n made unique by appending `_n`. For example, calling `my_op` the second time\n will generate `MyOp_1/a`, etc.\n "], ["tf.nest", "description: Functions that work with structures.", true, "Functions that work with structures.\n\nA structure is either:\n\n* one of the recognized Python collections, holding _nested structures_;\n* a value of any other type, typically a TensorFlow data type like Tensor,\n Variable, or of compatible types such as int, float, ndarray, etc. these are\n commonly referred to as _atoms_ of the structure.\n\nA structure of type `T` is a structure whose atomic items are of type `T`.\nFor example, a structure of `tf.Tensor` only contains `tf.Tensor` as its atoms.\n\nHistorically a _nested structure_ was called a _nested sequence_ in TensorFlow.\nA nested structure is sometimes called a _nest_ or a _tree_, but the formal\nname _nested structure_ is preferred.\n\nRefer to [Nesting Data Structures]\n(https://en.wikipedia.org/wiki/Nesting_(computing)#Data_structures).\n\nThe following collection types are recognized by `tf.nest` as nested\nstructures:\n\n* `collections.abc.Sequence` (except `string` and `bytes`).\n This includes `list`, `tuple`, and `namedtuple`.\n* `collections.abc.Mapping` (with sortable keys).\n This includes `dict` and `collections.OrderedDict`.\n* `collections.abc.MappingView` (with sortable keys).\n* [`attr.s` classes](https://www.attrs.org/).\n\nAny other values are considered **atoms**. Not all collection types are\nconsidered nested structures. For example, the following types are\nconsidered atoms:\n\n* `set`; `{\"a\", \"b\"}` is an atom, while `[\"a\", \"b\"]` is a nested structure.\n* [`dataclass` classes](https://docs.python.org/library/dataclasses.html)\n* `tf.Tensor`\n* `numpy.array`\n\n`tf.nest.is_nested` checks whether an object is a nested structure or an atom.\nFor example:\n\n >>> tf.nest.is_nested(\"1234\")\n False\n >>> tf.nest.is_nested([1, 3, [4, 5]])\n True\n >>> tf.nest.is_nested(((7, 8), (5, 6)))\n True\n >>> tf.nest.is_nested([])\n True\n >>> tf.nest.is_nested({\"a\": 1, \"b\": 2})\n True\n >>> tf.nest.is_nested({\"a\": 1, \"b\": 2}.keys())\n True\n >>> tf.nest.is_nested({\"a\": 1, \"b\": 2}.values())\n True\n >>> tf.nest.is_nested({\"a\": 1, \"b\": 2}.items())\n True\n >>> tf.nest.is_nested(set([1, 2]))\n False\n >>> ones = tf.ones([2, 3])\n >>> tf.nest.is_nested(ones)\n False\n\nNote: A proper structure shall form a tree. The user shall ensure there is no\ncyclic references within the items in the structure,\ni.e., no references in the structure of the input of these functions\nshould be recursive. The behavior is undefined if there is a cycle.\n\n\n"], ["tf.nn", "description: Primitive Neural Net (NN) Operations.", true, "Primitive Neural Net (NN) Operations.\n\n## Notes on padding\n\nSeveral neural network operations, such as `tf.nn.conv2d` and\n`tf.nn.max_pool2d`, take a `padding` parameter, which controls how the input is\npadded before running the operation. The input is padded by inserting values\n(typically zeros) before and after the tensor in each spatial dimension. The\n`padding` parameter can either be the string `'VALID'`, which means use no\npadding, or `'SAME'` which adds padding according to a formula which is\ndescribed below. Certain ops also allow the amount of padding per dimension to\nbe explicitly specified by passing a list to `padding`.\n\nIn the case of convolutions, the input is padded with zeros. In case of pools,\nthe padded input values are ignored. For example, in a max pool, the sliding\nwindow ignores padded values, which is equivalent to the padded values being\n`-infinity`.\n\n### `'VALID'` padding\n\nPassing `padding='VALID'` to an op causes no padding to be used. This causes the\noutput size to typically be smaller than the input size, even when the stride is\none. In the 2D case, the output size is computed as:\n\n```python\nout_height = ceil((in_height - filter_height + 1) / stride_height)\nout_width = ceil((in_width - filter_width + 1) / stride_width)\n```\n\nThe 1D and 3D cases are similar. Note `filter_height` and `filter_width` refer\nto the filter size after dilations (if any) for convolutions, and refer to the\nwindow size for pools.\n\n### `'SAME'` padding\n\nWith `'SAME'` padding, padding is applied to each spatial dimension. When the\nstrides are 1, the input is padded such that the output size is the same as the\ninput size. In the 2D case, the output size is computed as:\n\n```python\nout_height = ceil(in_height / stride_height)\nout_width = ceil(in_width / stride_width)\n```\n\nThe amount of padding used is the smallest amount that results in the output\nsize. The formula for the total amount of padding per dimension is:\n\n```python\nif (in_height % strides[1] == 0):\n pad_along_height = max(filter_height - stride_height, 0)\nelse:\n pad_along_height = max(filter_height - (in_height % stride_height), 0)\nif (in_width % strides[2] == 0):\n pad_along_width = max(filter_width - stride_width, 0)\nelse:\n pad_along_width = max(filter_width - (in_width % stride_width), 0)\n```\n\nFinally, the padding on the top, bottom, left and right are:\n\n```python\npad_top = pad_along_height // 2\npad_bottom = pad_along_height - pad_top\npad_left = pad_along_width // 2\npad_right = pad_along_width - pad_left\n```\n\nNote that the division by 2 means that there might be cases when the padding on\nboth sides (top vs bottom, right vs left) are off by one. In this case, the\nbottom and right sides always get the one additional padded pixel. For example,\nwhen pad_along_height is 5, we pad 2 pixels at the top and 3 pixels at the\nbottom. Note that this is different from existing libraries such as PyTorch and\nCaffe, which explicitly specify the number of padded pixels and always pad the\nsame number of pixels on both sides.\n\nHere is an example of `'SAME'` padding:\n\n>>> in_height = 5\n>>> filter_height = 3\n>>> stride_height = 2\n>>>\n>>> in_width = 2\n>>> filter_width = 2\n>>> stride_width = 1\n>>>\n>>> inp = tf.ones((2, in_height, in_width, 2))\n>>> filter = tf.ones((filter_height, filter_width, 2, 2))\n>>> strides = [stride_height, stride_width]\n>>> output = tf.nn.conv2d(inp, filter, strides, padding='SAME')\n>>> output.shape[1] # output_height: ceil(5 / 2)\n3\n>>> output.shape[2] # output_width: ceil(2 / 1)\n2\n\n### Explicit padding\n\nCertain ops, like `tf.nn.conv2d`, also allow a list of explicit padding amounts\nto be passed to the `padding` parameter. This list is in the same format as what\nis passed to `tf.pad`, except the padding must be a nested list, not a tensor.\nFor example, in the 2D case, the list is in the format `[[0, 0], [pad_top,\npad_bottom], [pad_left, pad_right], [0, 0]]` when `data_format` is its default\nvalue of `'NHWC'`. The two `[0, 0]` pairs indicate the batch and channel\ndimensions have no padding, which is required, as only spatial dimensions can\nhave padding.\n\nFor example:\n\n>>> inp = tf.ones((1, 3, 3, 1))\n>>> filter = tf.ones((2, 2, 1, 1))\n>>> strides = [1, 1]\n>>> padding = [[0, 0], [1, 2], [0, 1], [0, 0]]\n>>> output = tf.nn.conv2d(inp, filter, strides, padding=padding)\n>>> tuple(output.shape)\n(1, 5, 3, 1)\n>>> # Equivalently, tf.pad can be used, since convolutions pad with zeros.\n>>> inp = tf.pad(inp, padding)\n>>> # 'VALID' means to use no padding in conv2d (we already padded inp)\n>>> output2 = tf.nn.conv2d(inp, filter, strides, padding='VALID')\n>>> tf.debugging.assert_equal(output, output2)\n\n"], ["tf.nondifferentiable_batch_function", "description: Batches the computation done by the decorated function.", false, "Batches the computation done by the decorated function.\n\n So, for example, in the following code\n\n ```python\n @batch_function(1, 2, 3)\n def layer(a):\n return tf.matmul(a, a)\n\n b = layer(w)\n ```\n\n if more than one session.run call is simultaneously trying to compute `b`\n the values of `w` will be gathered, non-deterministically concatenated\n along the first axis, and only one thread will run the computation. See the\n documentation of the `Batch` op for more details.\n\n Assumes that all arguments of the decorated function are Tensors which will\n be batched along their first dimension.\n\n SparseTensor is not supported. The return value of the decorated function\n must be a Tensor or a list/tuple of Tensors.\n\n Args:\n num_batch_threads: Number of scheduling threads for processing batches\n of work. Determines the number of batches processed in parallel.\n max_batch_size: Batch sizes will never be bigger than this.\n batch_timeout_micros: Maximum number of microseconds to wait before\n outputting an incomplete batch.\n allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,\n does nothing. Otherwise, supplies a list of batch sizes, causing the op\n to pad batches up to one of those sizes. The entries must increase\n monotonically, and the final entry must equal max_batch_size.\n max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.\n autograph: Whether to use autograph to compile python and eager style code\n for efficient graph-mode execution.\n enable_large_batch_splitting: The value of this option doesn't affect\n processing output given the same input; it affects implementation details\n as stated below: 1. Improve batching efficiency by eliminating unnecessary\n adding. 2.`max_batch_size` specifies the limit of input and\n `allowed_batch_sizes` specifies the limit of a task to be processed. API\n user can give an input of size 128 when 'max_execution_batch_size'\n is 32 -> implementation can split input of 128 into 4 x 32, schedule\n concurrent processing, and then return concatenated results corresponding\n to 128.\n\n Returns:\n The decorated function will return the unbatched computation output Tensors.\n "], ["tf.norm", "description: Computes the norm of vectors, matrices, and tensors.", false, "Computes the norm of vectors, matrices, and tensors.\n\n This function can compute several different vector norms (the 1-norm, the\n Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and\n matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).\n\n Args:\n tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`\n ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`,\n `1`, `2`, `np.inf` and any positive real number yielding the corresponding\n p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if\n `tensor` is a matrix and equivalent to 2-norm for vectors.\n Some restrictions apply:\n a) The Frobenius norm `'fro'` is not defined for vectors,\n b) If axis is a 2-tuple (matrix norm), only `'euclidean'`, '`fro'`, `1`,\n `2`, `np.inf` are supported.\n See the description of `axis` on how to compute norms for a batch of\n vectors or matrices stored in a tensor.\n axis: If `axis` is `None` (the default), the input is considered a vector\n and a single vector norm is computed over the entire set of values in the\n tensor, i.e. `norm(tensor, ord=ord)` is equivalent to\n `norm(reshape(tensor, [-1]), ord=ord)`.\n If `axis` is a Python integer, the input is considered a batch of vectors,\n and `axis` determines the axis in `tensor` over which to compute vector\n norms.\n If `axis` is a 2-tuple of Python integers it is considered a batch of\n matrices and `axis` determines the axes in `tensor` over which to compute\n a matrix norm.\n Negative indices are supported. Example: If you are passing a tensor that\n can be either a matrix or a batch of matrices at runtime, pass\n `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are\n computed.\n keepdims: If True, the axis indicated in `axis` are kept with size 1.\n Otherwise, the dimensions in `axis` are removed from the output shape.\n name: The name of the op.\n\n Returns:\n output: A `Tensor` of the same type as tensor, containing the vector or\n matrix norms. If `keepdims` is True then the rank of output is equal to\n the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar,\n if `axis` is an integer, the rank of `output` is one less than the rank\n of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less\n than the rank of `tensor`.\n\n Raises:\n ValueError: If `ord` or `axis` is invalid.\n\n @compatibility(numpy)\n Mostly equivalent to numpy.linalg.norm.\n Not supported: ord <= 0, 2-norm for matrices, nuclear norm.\n Other differences:\n a) If axis is `None`, treats the flattened `tensor` as a vector\n regardless of rank.\n b) Explicitly supports 'euclidean' norm as the default, including for\n higher order tensors.\n @end_compatibility\n "], ["tf.no_gradient", "description: Specifies that ops of type op_type is not differentiable.", false, "Specifies that ops of type `op_type` is not differentiable.\n\n This function should *not* be used for operations that have a\n well-defined gradient that is not yet implemented.\n\n This function is only used when defining a new op type. It may be\n used for ops such as `tf.size()` that are not differentiable. For\n example:\n\n ```python\n tf.no_gradient(\"Size\")\n ```\n\n The gradient computed for 'op_type' will then propagate zeros.\n\n For ops that have a well-defined gradient but are not yet implemented,\n no declaration should be made, and an error *must* be thrown if\n an attempt to request its gradient is made.\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n\n Raises:\n TypeError: If `op_type` is not a string.\n\n "], ["tf.no_op", "description: Does nothing. Only useful as a placeholder for control edges.", false, "Does nothing. Only useful as a placeholder for control edges.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n "], ["tf.numpy_function", "description: Wraps a python function and uses it as a TensorFlow op.", false, "Wraps a python function and uses it as a TensorFlow op.\n\n Given a python function `func` wrap this function as an operation in a\n TensorFlow function. `func` must take numpy arrays as its arguments and\n return numpy arrays as its outputs.\n\n The following example creates a TensorFlow graph with `np.sinh()` as an\n operation in the graph:\n\n >>> def my_numpy_func(x):\n ... # x will be a numpy array with the contents of the input to the\n ... # tf.function\n ... return np.sinh(x)\n >>> @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])\n ... def tf_function(input):\n ... y = tf.numpy_function(my_numpy_func, [input], tf.float32)\n ... return y * y\n >>> tf_function(tf.constant(1.))\n \n\n Comparison to `tf.py_function`:\n `tf.py_function` and `tf.numpy_function` are very similar, except that\n `tf.numpy_function` takes numpy arrays, and not `tf.Tensor`s. If you want the\n function to contain `tf.Tensors`, and have any TensorFlow operations executed\n in the function be differentiable, please use `tf.py_function`.\n\n Note: We recommend to avoid using `tf.numpy_function` outside of\n prototyping and experimentation due to the following known limitations:\n\n * Calling `tf.numpy_function` will acquire the Python Global Interpreter Lock\n (GIL) that allows only one thread to run at any point in time. This will\n preclude efficient parallelization and distribution of the execution of the\n program. Therefore, you are discouraged to use `tf.numpy_function` outside\n of prototyping and experimentation.\n\n * The body of the function (i.e. `func`) will not be serialized in a\n `tf.SavedModel`. Therefore, you should not use this function if you need to\n serialize your model and restore it in a different environment.\n\n * The operation must run in the same address space as the Python program\n that calls `tf.numpy_function()`. If you are using distributed\n TensorFlow, you must run a `tf.distribute.Server` in the same process as the\n program that calls `tf.numpy_function` you must pin the created\n operation to a device in that server (e.g. using `with tf.device():`).\n\n * Currently `tf.numpy_function` is not compatible with XLA. Calling\n `tf.numpy_function` inside `tf.function(jit_comiple=True)` will raise an\n error.\n\n * Since the function takes numpy arrays, you cannot take gradients\n through a numpy_function. If you require something that is differentiable,\n please consider using tf.py_function.\n\n Args:\n func: A Python function, which accepts `numpy.ndarray` objects as arguments\n and returns a list of `numpy.ndarray` objects (or a single\n `numpy.ndarray`). This function must accept as many arguments as there are\n tensors in `inp`, and these argument types will match the corresponding\n `tf.Tensor` objects in `inp`. The returns `numpy.ndarray`s must match the\n number and types defined `Tout`.\n Important Note: Input and output `numpy.ndarray`s of `func` are not\n guaranteed to be copies. In some cases their underlying memory will be\n shared with the corresponding TensorFlow tensors. In-place modification\n or storing `func` input or return values in python datastructures\n without explicit (np.)copy can have non-deterministic consequences.\n inp: A list of `tf.Tensor` objects.\n Tout: A list or tuple of tensorflow data types or a single tensorflow data\n type if there is only one, indicating what `func` returns.\n stateful: (Boolean.) Setting this argument to False tells the runtime to\n treat the function as stateless, which enables certain optimizations.\n A function is stateless when given the same input it will return the\n same output and have no side effects; its only purpose is to have a\n return value.\n The behavior for a stateful function with the `stateful` argument False\n is undefined. In particular, caution should be taken when\n mutating the input arguments as this is a stateful operation.\n name: (Optional) A name for the operation.\n\n Returns:\n Single or list of `tf.Tensor` which `func` computes.\n "], ["tf.ones", "description: Creates a tensor with all elements set to one (1).", false, "Creates a tensor with all elements set to one (1).\n\n See also `tf.ones_like`, `tf.zeros`, `tf.fill`, `tf.eye`.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to one.\n\n >>> tf.ones([3, 4], tf.int32)\n \n\n Args:\n shape: A `list` of integers, a `tuple` of integers, or\n a 1-D `Tensor` of type `int32`.\n dtype: Optional DType of an element in the resulting `Tensor`. Default is\n `tf.float32`.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor` with all elements set to one (1).\n "], ["tf.ones_initializer", "description: Initializer that generates tensors initialized to 1.", false, "Initializer that generates tensors initialized to 1.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Examples:\n\n >>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.ones_initializer())\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.ones_like(tensor)\n \n\n Args:\n input: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to one.\n "], ["tf.one_hot", "description: Returns a one-hot tensor.", false, "Returns a one-hot tensor.\n\n See also `tf.fill`, `tf.eye`.\n\n The locations represented by indices in `indices` take value `on_value`,\n while all other locations take value `off_value`.\n\n `on_value` and `off_value` must have matching data types. If `dtype` is also\n provided, they must be the same data type as specified by `dtype`.\n\n If `on_value` is not provided, it will default to the value `1` with type\n `dtype`\n\n If `off_value` is not provided, it will default to the value `0` with type\n `dtype`\n\n If the input `indices` is rank `N`, the output will have rank `N+1`. The\n new axis is created at dimension `axis` (default: the new axis is appended\n at the end).\n\n If `indices` is a scalar the output shape will be a vector of length `depth`\n\n If `indices` is a vector of length `features`, the output shape will be:\n\n ```\n features x depth if axis == -1\n depth x features if axis == 0\n ```\n\n If `indices` is a matrix (batch) with shape `[batch, features]`, the output\n shape will be:\n\n ```\n batch x features x depth if axis == -1\n batch x depth x features if axis == 1\n depth x batch x features if axis == 0\n ```\n\n If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer\n to a non-ragged axis. The output will be equivalent to applying 'one_hot' on\n the values of the RaggedTensor, and creating a new RaggedTensor from the\n result.\n\n If `dtype` is not provided, it will attempt to assume the data type of\n `on_value` or `off_value`, if one or both are passed in. If none of\n `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the\n value `tf.float32`.\n\n Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,\n etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.\n\n For example:\n\n ```python\n indices = [0, 1, 2]\n depth = 3\n tf.one_hot(indices, depth) # output: [3 x 3]\n # [[1., 0., 0.],\n # [0., 1., 0.],\n # [0., 0., 1.]]\n\n indices = [0, 2, -1, 1]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=5.0, off_value=0.0,\n axis=-1) # output: [4 x 3]\n # [[5.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 5.0], # one_hot(2)\n # [0.0, 0.0, 0.0], # one_hot(-1)\n # [0.0, 5.0, 0.0]] # one_hot(1)\n\n indices = [[0, 2], [1, -1]]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=1.0, off_value=0.0,\n axis=-1) # output: [2 x 2 x 3]\n # [[[1.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 1.0]], # one_hot(2)\n # [[0.0, 1.0, 0.0], # one_hot(1)\n # [0.0, 0.0, 0.0]]] # one_hot(-1)\n\n indices = tf.ragged.constant([[0, 1], [2]])\n depth = 3\n tf.one_hot(indices, depth) # output: [2 x None x 3]\n # [[[1., 0., 0.],\n # [0., 1., 0.]],\n # [[0., 0., 1.]]]\n ```\n\n Args:\n indices: A `Tensor` of indices.\n depth: A scalar defining the depth of the one hot dimension.\n on_value: A scalar defining the value to fill in output when `indices[j]\n = i`. (default: 1)\n off_value: A scalar defining the value to fill in output when `indices[j]\n != i`. (default: 0)\n axis: The axis to fill (default: -1, a new inner-most axis).\n dtype: The data type of the output tensor.\n name: A name for the operation (optional).\n\n Returns:\n output: The one-hot tensor.\n\n Raises:\n TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`\n TypeError: If dtype of `on_value` and `off_value` don't match one another\n "], ["tf.Operation", "description: Represents a graph node that performs computation on tensors.", false, "Represents a graph node that performs computation on tensors.\n\n An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`\n objects as input, and produces zero or more `Tensor` objects as output.\n Objects of type `Operation` are created by calling a Python op constructor\n (such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`\n context manager.\n\n For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an\n `Operation` of type \"MatMul\" that takes tensors `a` and `b` as input, and\n produces `c` as output.\n\n If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be\n executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for\n calling `tf.compat.v1.get_default_session().run(op)`.\n "], ["tf.OptionalSpec", "description: Type specification for tf.experimental.Optional.", false, "Type specification for `tf.experimental.Optional`.\n\n For instance, `tf.OptionalSpec` can be used to define a tf.function that takes\n `tf.experimental.Optional` as an input argument:\n\n >>> @tf.function(input_signature=[tf.OptionalSpec(\n ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))])\n ... def maybe_square(optional):\n ... if optional.has_value():\n ... x = optional.get_value()\n ... return x * x\n ... return -1\n >>> optional = tf.experimental.Optional.from_value(5)\n >>> print(maybe_square(optional))\n tf.Tensor(25, shape=(), dtype=int32)\n\n Attributes:\n element_spec: A (nested) structure of `TypeSpec` objects that represents the\n type specification of the optional element.\n "], ["tf.pad", "description: Pads a tensor.", false, "Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n t = tf.constant([[1, 2, 3], [4, 5, 6]])\n paddings = tf.constant([[1, 1,], [2, 2]])\n # 'constant_values' is 0.\n # rank of 't' is 2.\n tf.pad(t, paddings, \"CONSTANT\") # [[0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 1, 2, 3, 0, 0],\n # [0, 0, 4, 5, 6, 0, 0],\n # [0, 0, 0, 0, 0, 0, 0]]\n\n tf.pad(t, paddings, \"REFLECT\") # [[6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1],\n # [6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1]]\n\n tf.pad(t, paddings, \"SYMMETRIC\") # [[2, 1, 1, 2, 3, 3, 2],\n # [2, 1, 1, 2, 3, 3, 2],\n # [5, 4, 4, 5, 6, 6, 5],\n # [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\" (case-insensitive)\n constant_values: In \"CONSTANT\" mode, the scalar pad value to use. Must be\n same type as `tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n "], ["tf.parallel_stack", "description: Stacks a list of rank-R tensors into one rank-(R+1) tensor in parallel.", false, "Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.\n\n Requires that the shape of inputs be known at graph construction time.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the first dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`\n tensor will have the shape `(N, A, B, C)`.\n\n For example:\n\n ```python\n x = tf.constant([1, 4])\n y = tf.constant([2, 5])\n z = tf.constant([3, 6])\n tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]\n ```\n\n The difference between `stack` and `parallel_stack` is that `stack` requires\n all the inputs be computed before the operation will begin but doesn't require\n that the input shapes be known during graph construction.\n\n `parallel_stack` will copy pieces of the input into the output as they become\n available, in some situations this can provide a performance benefit.\n\n Unlike `stack`, `parallel_stack` does NOT support backpropagation.\n\n This is the opposite of unstack. The numpy equivalent is\n\n tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])\n\n @compatibility(eager)\n parallel_stack is not compatible with eager execution.\n @end_compatibility\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n\n Raises:\n RuntimeError: if executed in eager mode.\n "], ["tf.print", "description: Print the specified inputs.", false, "Print the specified inputs.\n\n A TensorFlow operator that prints the specified inputs to a desired\n output stream or logging level. The inputs may be dense or sparse Tensors,\n primitive python objects, data structures that contain tensors, and printable\n Python objects. Printed tensors will recursively show the first and last\n elements of each dimension to summarize.\n\n Example:\n Single-input usage:\n\n ```python\n tensor = tf.range(10)\n tf.print(tensor, output_stream=sys.stderr)\n ```\n\n (This prints \"[0 1 2 ... 7 8 9]\" to sys.stderr)\n\n Multi-input usage:\n\n ```python\n tensor = tf.range(10)\n tf.print(\"tensors:\", tensor, {2: tensor * 2}, output_stream=sys.stdout)\n ```\n\n (This prints \"tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}\" to\n sys.stdout)\n\n Changing the input separator:\n ```python\n tensor_a = tf.range(2)\n tensor_b = tensor_a * 2\n tf.print(tensor_a, tensor_b, output_stream=sys.stderr, sep=',')\n ```\n\n (This prints \"[0 1],[0 2]\" to sys.stderr)\n\n Usage in a `tf.function`:\n\n ```python\n @tf.function\n def f():\n tensor = tf.range(10)\n tf.print(tensor, output_stream=sys.stderr)\n return tensor\n\n range_tensor = f()\n ```\n\n (This prints \"[0 1 2 ... 7 8 9]\" to sys.stderr)\n\n *Compatibility usage in TF 1.x graphs*:\n\n In graphs manually created outside of `tf.function`, this method returns\n the created TF operator that prints the data. To make sure the\n operator runs, users need to pass the produced op to\n `tf.compat.v1.Session`'s run method, or to use the op as a control\n dependency for executed ops by specifying\n `with tf.compat.v1.control_dependencies([print_op])`.\n\n ```python\n tf.compat.v1.disable_v2_behavior() # for TF1 compatibility only\n\n sess = tf.compat.v1.Session()\n with sess.as_default():\n tensor = tf.range(10)\n print_op = tf.print(\"tensors:\", tensor, {2: tensor * 2},\n output_stream=sys.stdout)\n with tf.control_dependencies([print_op]):\n tripled_tensor = tensor * 3\n\n sess.run(tripled_tensor)\n ```\n\n (This prints \"tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}\" to\n sys.stdout)\n\n Note: In Jupyter notebooks and colabs, `tf.print` prints to the notebook\n cell outputs. It will not write to the notebook kernel's console logs.\n\n Args:\n *inputs: Positional arguments that are the inputs to print. Inputs in the\n printed output will be separated by spaces. Inputs may be python\n primitives, tensors, data structures such as dicts and lists that may\n contain tensors (with the data structures possibly nested in arbitrary\n ways), and printable python objects.\n output_stream: The output stream, logging level, or file to print to.\n Defaults to sys.stderr, but sys.stdout, tf.compat.v1.logging.info,\n tf.compat.v1.logging.warning, tf.compat.v1.logging.error,\n absl.logging.info, absl.logging.warning and absl.logging.error are also\n supported. To print to a file, pass a string started with \"file://\"\n followed by the file path, e.g., \"file:///tmp/foo.out\".\n summarize: The first and last `summarize` elements within each dimension are\n recursively printed per Tensor. If None, then the first 3 and last 3\n elements of each dimension are printed for each tensor. If set to -1, it\n will print all elements of every tensor.\n sep: The string to use to separate the inputs. Defaults to \" \".\n end: End character that is appended at the end the printed string. Defaults\n to the newline character.\n name: A name for the operation (optional).\n\n Returns:\n None when executing eagerly. During graph tracing this returns\n a TF operator that prints the specified inputs in the specified output\n stream or logging level. This operator will be automatically executed\n except inside of `tf.compat.v1` graphs and sessions.\n\n Raises:\n ValueError: If an unsupported output stream is specified.\n "], ["tf.profiler", "description: Public API for tf.profiler namespace.", true, "Public API for tf.profiler namespace.\n"], ["tf.py_function", "description: Wraps a python function into a TensorFlow op that executes it eagerly.", false, "Wraps a python function into a TensorFlow op that executes it eagerly.\n\n This function allows expressing computations in a TensorFlow graph as\n Python functions. In particular, it wraps a Python function `func`\n in a once-differentiable TensorFlow operation that executes it with eager\n execution enabled. As a consequence, `tf.py_function` makes it\n possible to express control flow using Python constructs (`if`, `while`,\n `for`, etc.), instead of TensorFlow control flow constructs (`tf.cond`,\n `tf.while_loop`). For example, you might use `tf.py_function` to\n implement the log huber function:\n\n ```python\n def log_huber(x, m):\n if tf.abs(x) <= m:\n return x**2\n else:\n return m**2 * (1 - 2 * tf.math.log(m) + tf.math.log(x**2))\n\n x = tf.constant(1.0)\n m = tf.constant(2.0)\n\n with tf.GradientTape() as t:\n t.watch([x, m])\n y = tf.py_function(func=log_huber, inp=[x, m], Tout=tf.float32)\n\n dy_dx = t.gradient(y, x)\n assert dy_dx.numpy() == 2.0\n ```\n\n You can also use `tf.py_function` to debug your models at runtime\n using Python tools, i.e., you can isolate portions of your code that\n you want to debug, wrap them in Python functions and insert `pdb` tracepoints\n or print statements as desired, and wrap those functions in\n `tf.py_function`.\n\n For more information on eager execution, see the\n [Eager guide](https://tensorflow.org/guide/eager).\n\n `tf.py_function` is similar in spirit to `tf.compat.v1.py_func`, but unlike\n the latter, the former lets you use TensorFlow operations in the wrapped\n Python function. In particular, while `tf.compat.v1.py_func` only runs on CPUs\n and wraps functions that take NumPy arrays as inputs and return NumPy arrays\n as outputs, `tf.py_function` can be placed on GPUs and wraps functions\n that take Tensors as inputs, execute TensorFlow operations in their bodies,\n and return Tensors as outputs.\n\n Note: We recommend to avoid using `tf.py_function` outside of prototyping\n and experimentation due to the following known limitations:\n\n * Calling `tf.py_function` will acquire the Python Global Interpreter Lock\n (GIL) that allows only one thread to run at any point in time. This will\n preclude efficient parallelization and distribution of the execution of the\n program.\n\n * The body of the function (i.e. `func`) will not be serialized in a\n `GraphDef`. Therefore, you should not use this function if you need to\n serialize your model and restore it in a different environment.\n\n * The operation must run in the same address space as the Python program\n that calls `tf.py_function()`. If you are using distributed\n TensorFlow, you must run a `tf.distribute.Server` in the same process as the\n program that calls `tf.py_function()` and you must pin the created\n operation to a device in that server (e.g. using `with tf.device():`).\n\n * Currently `tf.py_function` is not compatible with XLA. Calling\n `tf.py_function` inside `tf.function(jit_comiple=True)` will raise an\n error.\n\n Args:\n func: A Python function that accepts `inp` as arguments, and returns a\n value (or list of values) whose type is described by `Tout`.\n\n inp: Input arguments for `func`. A list whose elements are `Tensor`s or\n `CompositeTensors` (such as `tf.RaggedTensor`); or a single `Tensor` or\n `CompositeTensor`.\n\n Tout: The type(s) of the value(s) returned by `func`. One of the\n following.\n\n * If `func` returns a `Tensor` (or a value that can be converted to a\n Tensor): the `tf.DType` for that value.\n * If `func` returns a `CompositeTensor`: The `tf.TypeSpec` for that value.\n * If `func` returns `None`: the empty list (`[]`).\n * If `func` returns a list of `Tensor` and `CompositeTensor` values:\n a corresponding list of `tf.DType`s and `tf.TypeSpec`s for each value.\n\n name: A name for the operation (optional).\n\n Returns:\n The value(s) computed by `func`: a `Tensor`, `CompositeTensor`, or list of\n `Tensor` and `CompositeTensor`; or an empty list if `func` returns `None`.\n "], ["tf.quantization", "description: Public API for tf.quantization namespace.", true, "Public API for tf.quantization namespace.\n"], ["tf.queue", "description: Public API for tf.queue namespace.", true, "Public API for tf.queue namespace.\n"], ["tf.ragged", "description: Ragged Tensors.", true, "Ragged Tensors.\n\nThis package defines ops for manipulating ragged tensors (`tf.RaggedTensor`),\nwhich are tensors with non-uniform shapes. In particular, each `RaggedTensor`\nhas one or more *ragged dimensions*, which are dimensions whose slices may have\ndifferent lengths. For example, the inner (column) dimension of\n`rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices\n(`rt[0, :]`, ..., `rt[4, :]`) have different lengths. For a more detailed\ndescription of ragged tensors, see the `tf.RaggedTensor` class documentation\nand the [Ragged Tensor Guide](/guide/ragged_tensor).\n\n\n### Additional ops that support `RaggedTensor`\n\nArguments that accept `RaggedTensor`s are marked in **bold**.\n\n* `tf.__operators__.eq`(**self**, **other**)\n* `tf.__operators__.ne`(**self**, **other**)\n* `tf.bitcast`(**input**, type, name=`None`)\n* `tf.bitwise.bitwise_and`(**x**, **y**, name=`None`)\n* `tf.bitwise.bitwise_or`(**x**, **y**, name=`None`)\n* `tf.bitwise.bitwise_xor`(**x**, **y**, name=`None`)\n* `tf.bitwise.invert`(**x**, name=`None`)\n* `tf.bitwise.left_shift`(**x**, **y**, name=`None`)\n* `tf.bitwise.right_shift`(**x**, **y**, name=`None`)\n* `tf.broadcast_to`(**input**, **shape**, name=`None`)\n* `tf.cast`(**x**, dtype, name=`None`)\n* `tf.clip_by_value`(**t**, clip_value_min, clip_value_max, name=`None`)\n* `tf.concat`(**values**, axis, name=`'concat'`)\n* `tf.debugging.check_numerics`(**tensor**, message, name=`None`)\n* `tf.dtypes.complex`(**real**, **imag**, name=`None`)\n* `tf.dtypes.saturate_cast`(**value**, dtype, name=`None`)\n* `tf.dynamic_partition`(**data**, **partitions**, num_partitions, name=`None`)\n* `tf.expand_dims`(**input**, axis, name=`None`)\n* `tf.gather_nd`(**params**, **indices**, batch_dims=`0`, name=`None`)\n* `tf.gather`(**params**, **indices**, validate_indices=`None`, axis=`None`, batch_dims=`0`, name=`None`)\n* `tf.image.adjust_brightness`(**image**, delta)\n* `tf.image.adjust_gamma`(**image**, gamma=`1`, gain=`1`)\n* `tf.image.convert_image_dtype`(**image**, dtype, saturate=`False`, name=`None`)\n* `tf.image.random_brightness`(**image**, max_delta, seed=`None`)\n* `tf.image.resize`(**images**, size, method=`'bilinear'`, preserve_aspect_ratio=`False`, antialias=`False`, name=`None`)\n* `tf.image.stateless_random_brightness`(**image**, max_delta, seed)\n* `tf.io.decode_base64`(**input**, name=`None`)\n* `tf.io.decode_compressed`(**bytes**, compression_type=`''`, name=`None`)\n* `tf.io.encode_base64`(**input**, pad=`False`, name=`None`)\n* `tf.linalg.matmul`(**a**, **b**, transpose_a=`False`, transpose_b=`False`, adjoint_a=`False`, adjoint_b=`False`, a_is_sparse=`False`, b_is_sparse=`False`, output_type=`None`, name=`None`)\n* `tf.math.abs`(**x**, name=`None`)\n* `tf.math.acos`(**x**, name=`None`)\n* `tf.math.acosh`(**x**, name=`None`)\n* `tf.math.add_n`(**inputs**, name=`None`)\n* `tf.math.add`(**x**, **y**, name=`None`)\n* `tf.math.angle`(**input**, name=`None`)\n* `tf.math.asin`(**x**, name=`None`)\n* `tf.math.asinh`(**x**, name=`None`)\n* `tf.math.atan2`(**y**, **x**, name=`None`)\n* `tf.math.atan`(**x**, name=`None`)\n* `tf.math.atanh`(**x**, name=`None`)\n* `tf.math.bessel_i0`(**x**, name=`None`)\n* `tf.math.bessel_i0e`(**x**, name=`None`)\n* `tf.math.bessel_i1`(**x**, name=`None`)\n* `tf.math.bessel_i1e`(**x**, name=`None`)\n* `tf.math.ceil`(**x**, name=`None`)\n* `tf.math.conj`(**x**, name=`None`)\n* `tf.math.cos`(**x**, name=`None`)\n* `tf.math.cosh`(**x**, name=`None`)\n* `tf.math.digamma`(**x**, name=`None`)\n* `tf.math.divide_no_nan`(**x**, **y**, name=`None`)\n* `tf.math.divide`(**x**, **y**, name=`None`)\n* `tf.math.equal`(**x**, **y**, name=`None`)\n* `tf.math.erf`(**x**, name=`None`)\n* `tf.math.erfc`(**x**, name=`None`)\n* `tf.math.erfcinv`(**x**, name=`None`)\n* `tf.math.erfinv`(**x**, name=`None`)\n* `tf.math.exp`(**x**, name=`None`)\n* `tf.math.expm1`(**x**, name=`None`)\n* `tf.math.floor`(**x**, name=`None`)\n* `tf.math.floordiv`(**x**, **y**, name=`None`)\n* `tf.math.floormod`(**x**, **y**, name=`None`)\n* `tf.math.greater_equal`(**x**, **y**, name=`None`)\n* `tf.math.greater`(**x**, **y**, name=`None`)\n* `tf.math.imag`(**input**, name=`None`)\n* `tf.math.is_finite`(**x**, name=`None`)\n* `tf.math.is_inf`(**x**, name=`None`)\n* `tf.math.is_nan`(**x**, name=`None`)\n* `tf.math.less_equal`(**x**, **y**, name=`None`)\n* `tf.math.less`(**x**, **y**, name=`None`)\n* `tf.math.lgamma`(**x**, name=`None`)\n* `tf.math.log1p`(**x**, name=`None`)\n* `tf.math.log_sigmoid`(**x**, name=`None`)\n* `tf.math.log`(**x**, name=`None`)\n* `tf.math.logical_and`(**x**, **y**, name=`None`)\n* `tf.math.logical_not`(**x**, name=`None`)\n* `tf.math.logical_or`(**x**, **y**, name=`None`)\n* `tf.math.logical_xor`(**x**, **y**, name=`'LogicalXor'`)\n* `tf.math.maximum`(**x**, **y**, name=`None`)\n* `tf.math.minimum`(**x**, **y**, name=`None`)\n* `tf.math.multiply_no_nan`(**x**, **y**, name=`None`)\n* `tf.math.multiply`(**x**, **y**, name=`None`)\n* `tf.math.ndtri`(**x**, name=`None`)\n* `tf.math.negative`(**x**, name=`None`)\n* `tf.math.nextafter`(**x1**, x2, name=`None`)\n* `tf.math.not_equal`(**x**, **y**, name=`None`)\n* `tf.math.pow`(**x**, **y**, name=`None`)\n* `tf.math.real`(**input**, name=`None`)\n* `tf.math.reciprocal_no_nan`(**x**, name=`None`)\n* `tf.math.reciprocal`(**x**, name=`None`)\n* `tf.math.reduce_all`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_any`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_max`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_mean`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_min`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_prod`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_std`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_sum`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_variance`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.rint`(**x**, name=`None`)\n* `tf.math.round`(**x**, name=`None`)\n* `tf.math.rsqrt`(**x**, name=`None`)\n* `tf.math.scalar_mul`(**scalar**, **x**, name=`None`)\n* `tf.math.sigmoid`(**x**, name=`None`)\n* `tf.math.sign`(**x**, name=`None`)\n* `tf.math.sin`(**x**, name=`None`)\n* `tf.math.sinh`(**x**, name=`None`)\n* `tf.math.softplus`(**features**, name=`None`)\n* `tf.math.special.bessel_j0`(**x**, name=`None`)\n* `tf.math.special.bessel_j1`(**x**, name=`None`)\n* `tf.math.special.bessel_k0`(**x**, name=`None`)\n* `tf.math.special.bessel_k0e`(**x**, name=`None`)\n* `tf.math.special.bessel_k1`(**x**, name=`None`)\n* `tf.math.special.bessel_k1e`(**x**, name=`None`)\n* `tf.math.special.bessel_y0`(**x**, name=`None`)\n* `tf.math.special.bessel_y1`(**x**, name=`None`)\n* `tf.math.special.dawsn`(**x**, name=`None`)\n* `tf.math.special.expint`(**x**, name=`None`)\n* `tf.math.special.fresnel_cos`(**x**, name=`None`)\n* `tf.math.special.fresnel_sin`(**x**, name=`None`)\n* `tf.math.special.spence`(**x**, name=`None`)\n* `tf.math.sqrt`(**x**, name=`None`)\n* `tf.math.square`(**x**, name=`None`)\n* `tf.math.squared_difference`(**x**, **y**, name=`None`)\n* `tf.math.subtract`(**x**, **y**, name=`None`)\n* `tf.math.tan`(**x**, name=`None`)\n* `tf.math.tanh`(**x**, name=`None`)\n* `tf.math.truediv`(**x**, **y**, name=`None`)\n* `tf.math.unsorted_segment_max`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_mean`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_min`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_prod`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_sqrt_n`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_sum`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.xdivy`(**x**, **y**, name=`None`)\n* `tf.math.xlog1py`(**x**, **y**, name=`None`)\n* `tf.math.xlogy`(**x**, **y**, name=`None`)\n* `tf.math.zeta`(**x**, **q**, name=`None`)\n* `tf.nn.dropout`(**x**, rate, noise_shape=`None`, seed=`None`, name=`None`)\n* `tf.nn.elu`(**features**, name=`None`)\n* `tf.nn.gelu`(**features**, approximate=`False`, name=`None`)\n* `tf.nn.leaky_relu`(**features**, alpha=`0.2`, name=`None`)\n* `tf.nn.relu6`(**features**, name=`None`)\n* `tf.nn.relu`(**features**, name=`None`)\n* `tf.nn.selu`(**features**, name=`None`)\n* `tf.nn.sigmoid_cross_entropy_with_logits`(**labels**=`None`, **logits**=`None`, name=`None`)\n* `tf.nn.silu`(**features**, beta=`1.0`)\n* `tf.nn.softmax`(**logits**, axis=`None`, name=`None`)\n* `tf.nn.softsign`(**features**, name=`None`)\n* `tf.one_hot`(**indices**, depth, on_value=`None`, off_value=`None`, axis=`None`, dtype=`None`, name=`None`)\n* `tf.ones_like`(**input**, dtype=`None`, name=`None`)\n* `tf.print`(***inputs**, **kwargs)\n* `tf.rank`(**input**, name=`None`)\n* `tf.realdiv`(**x**, **y**, name=`None`)\n* `tf.reshape`(**tensor**, **shape**, name=`None`)\n* `tf.reverse`(**tensor**, axis, name=`None`)\n* `tf.size`(**input**, out_type=`tf.int32`, name=`None`)\n* `tf.split`(**value**, num_or_size_splits, axis=`0`, num=`None`, name=`'split'`)\n* `tf.squeeze`(**input**, axis=`None`, name=`None`)\n* `tf.stack`(**values**, axis=`0`, name=`'stack'`)\n* `tf.strings.as_string`(**input**, precision=`-1`, scientific=`False`, shortest=`False`, width=`-1`, fill=`''`, name=`None`)\n* `tf.strings.format`(**template**, **inputs**, placeholder=`'{}'`, summarize=`3`, name=`None`)\n* `tf.strings.join`(**inputs**, separator=`''`, name=`None`)\n* `tf.strings.length`(**input**, unit=`'BYTE'`, name=`None`)\n* `tf.strings.lower`(**input**, encoding=`''`, name=`None`)\n* `tf.strings.reduce_join`(**inputs**, axis=`None`, keepdims=`False`, separator=`''`, name=`None`)\n* `tf.strings.regex_full_match`(**input**, pattern, name=`None`)\n* `tf.strings.regex_replace`(**input**, pattern, rewrite, replace_global=`True`, name=`None`)\n* `tf.strings.strip`(**input**, name=`None`)\n* `tf.strings.substr`(**input**, pos, len, unit=`'BYTE'`, name=`None`)\n* `tf.strings.to_hash_bucket_fast`(**input**, num_buckets, name=`None`)\n* `tf.strings.to_hash_bucket_strong`(**input**, num_buckets, key, name=`None`)\n* `tf.strings.to_hash_bucket`(**input**, num_buckets, name=`None`)\n* `tf.strings.to_number`(**input**, out_type=`tf.float32`, name=`None`)\n* `tf.strings.unicode_script`(**input**, name=`None`)\n* `tf.strings.unicode_transcode`(**input**, input_encoding, output_encoding, errors=`'replace'`, replacement_char=`65533`, replace_control_characters=`False`, name=`None`)\n* `tf.strings.upper`(**input**, encoding=`''`, name=`None`)\n* `tf.tile`(**input**, multiples, name=`None`)\n* `tf.truncatediv`(**x**, **y**, name=`None`)\n* `tf.truncatemod`(**x**, **y**, name=`None`)\n* `tf.where`(**condition**, **x**=`None`, **y**=`None`, name=`None`)\n* `tf.zeros_like`(**input**, dtype=`None`, name=`None`)n\n"], ["tf.RaggedTensor", "description: Represents a ragged tensor.", false, "Represents a ragged tensor.\n\n A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are\n dimensions whose slices may have different lengths. For example, the inner\n (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,\n since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.\n Dimensions whose slices all have the same length are called *uniform\n dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,\n since it consists of a single slice (and so there is no possibility for\n differing slice lengths).\n\n The total number of dimensions in a `RaggedTensor` is called its *rank*,\n and the number of ragged dimensions in a `RaggedTensor` is called its\n *ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation\n time: it can't depend on the runtime values of `Tensor`s, and can't vary\n dynamically for different session runs.\n\n Note that the `__init__` constructor is private. Please use one of the\n following methods to construct a `RaggedTensor`:\n\n * `tf.RaggedTensor.from_row_lengths`\n * `tf.RaggedTensor.from_value_rowids`\n * `tf.RaggedTensor.from_row_splits`\n * `tf.RaggedTensor.from_row_starts`\n * `tf.RaggedTensor.from_row_limits`\n * `tf.RaggedTensor.from_nested_row_splits`\n * `tf.RaggedTensor.from_nested_row_lengths`\n * `tf.RaggedTensor.from_nested_value_rowids`\n\n ### Potentially Ragged Tensors\n\n Many ops support both `Tensor`s and `RaggedTensor`s\n (see [tf.ragged](https://www.tensorflow.org/api_docs/python/tf/ragged) for a\n full listing). The term \"potentially ragged tensor\" may be used to refer to a\n tensor that might be either a `Tensor` or a `RaggedTensor`. The ragged-rank\n of a `Tensor` is zero.\n\n ### Documenting RaggedTensor Shapes\n\n When documenting the shape of a RaggedTensor, ragged dimensions can be\n indicated by enclosing them in parentheses. For example, the shape of\n a 3-D `RaggedTensor` that stores the fixed-size word embedding for each\n word in a sentence, for each sentence in a batch, could be written as\n `[num_sentences, (num_words), embedding_size]`. The parentheses around\n `(num_words)` indicate that dimension is ragged, and that the length\n of each element list in that dimension may vary for each item.\n\n ### Component Tensors\n\n Internally, a `RaggedTensor` consists of a concatenated list of values that\n are partitioned into variable-length rows. In particular, each `RaggedTensor`\n consists of:\n\n * A `values` tensor, which concatenates the variable-length rows into a\n flattened list. For example, the `values` tensor for\n `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.\n\n * A `row_splits` vector, which indicates how those flattened values are\n divided into rows. In particular, the values for row `rt[i]` are stored\n in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.\n\n Example:\n\n >>> print(tf.RaggedTensor.from_row_splits(\n ... values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... row_splits=[0, 4, 4, 7, 8, 8]))\n \n\n ### Alternative Row-Partitioning Schemes\n\n In addition to `row_splits`, ragged tensors provide support for five other\n row-partitioning schemes:\n\n * `row_lengths`: a vector with shape `[nrows]`, which specifies the length\n of each row.\n\n * `value_rowids` and `nrows`: `value_rowids` is a vector with shape\n `[nvals]`, corresponding one-to-one with `values`, which specifies\n each value's row index. In particular, the row `rt[row]` consists of the\n values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an\n integer scalar that specifies the number of rows in the\n `RaggedTensor`. (`nrows` is used to indicate trailing empty rows.)\n\n * `row_starts`: a vector with shape `[nrows]`, which specifies the start\n offset of each row. Equivalent to `row_splits[:-1]`.\n\n * `row_limits`: a vector with shape `[nrows]`, which specifies the stop\n offset of each row. Equivalent to `row_splits[1:]`.\n\n * `uniform_row_length`: A scalar tensor, specifying the length of every\n row. This row-partitioning scheme may only be used if all rows have\n the same length.\n\n Example: The following ragged tensors are equivalent, and all represent the\n nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.\n\n >>> values = [3, 1, 4, 1, 5, 9, 2, 6]\n >>> RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])\n \n >>> RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])\n \n >>> RaggedTensor.from_value_rowids(\n ... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)\n \n >>> RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])\n \n >>> RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])\n \n >>> RaggedTensor.from_uniform_row_length(values, uniform_row_length=2)\n \n\n ### Multiple Ragged Dimensions\n\n `RaggedTensor`s with multiple ragged dimensions can be defined by using\n a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`\n adds a single ragged dimension.\n\n >>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above\n ... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])\n >>> outer_rt = RaggedTensor.from_row_splits(\n ... values=inner_rt, row_splits=[0, 3, 3, 5])\n >>> print(outer_rt.to_list())\n [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]\n >>> print(outer_rt.ragged_rank)\n 2\n\n The factory function `RaggedTensor.from_nested_row_splits` may be used to\n construct a `RaggedTensor` with multiple ragged dimensions directly, by\n providing a list of `row_splits` tensors:\n\n >>> RaggedTensor.from_nested_row_splits(\n ... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()\n [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]\n\n ### Uniform Inner Dimensions\n\n `RaggedTensor`s with uniform inner dimensions can be defined\n by using a multidimensional `Tensor` for `values`.\n\n >>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32),\n ... row_splits=[0, 2, 5])\n >>> print(rt.to_list())\n [[[1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]\n >>> print(rt.shape)\n (2, None, 3)\n\n ### Uniform Outer Dimensions\n\n `RaggedTensor`s with uniform outer dimensions can be defined by using\n one or more `RaggedTensor` with a `uniform_row_length` row-partitioning\n tensor. For example, a `RaggedTensor` with shape `[2, 2, None]` can be\n constructed with this method from a `RaggedTensor` values with shape\n `[4, None]`:\n\n >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n >>> print(values.shape)\n (4, None)\n >>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2)\n >>> print(rt6)\n \n >>> print(rt6.shape)\n (2, 2, None)\n\n Note that `rt6` only contains one ragged dimension (the innermost\n dimension). In contrast, if `from_row_splits` is used to construct a similar\n `RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:\n\n >>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])\n >>> print(rt7.shape)\n (2, None, None)\n\n Uniform and ragged outer dimensions may be interleaved, meaning that a\n tensor with any combination of ragged and uniform dimensions may be created.\n For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could\n be constructed as follows:\n\n ```python\n t0 = tf.zeros([1000, 2]) # Shape: [1000, 2]\n t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2]\n t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2]\n t3 = RaggedTensor.from_uniform_row_length(t2, 4) # [5, 4, 8, None, 2]\n t4 = RaggedTensor.from_row_lengths(t3, [...]) # [3, None, 4, 8, None, 2]\n ```\n\n "], ["tf.RaggedTensorSpec", "description: Type specification for a tf.RaggedTensor.", false, "Type specification for a `tf.RaggedTensor`."], ["tf.random", "description: Public API for tf.random namespace.", true, "Public API for tf.random namespace.\n"], ["tf.random_index_shuffle", "description: Outputs the position of value in a permutation of [0, ..., max_index].", false, "Outputs the position of `value` in a permutation of [0, ..., max_index].\n\n Output values are a bijection of the `index` for any combination and `seed` and `max_index`.\n\n If multiple inputs are vectors (matrix in case of seed) then the size of the\n first dimension must match.\n\n The outputs are deterministic.\n\n Args:\n index: A `Tensor`. Must be one of the following types: `int32`, `uint32`, `int64`, `uint64`.\n A scalar tensor or a vector of dtype `dtype`. The index (or indices) to be shuffled. Must be within [0, max_index].\n seed: A `Tensor`. Must be one of the following types: `int32`, `uint32`, `int64`, `uint64`.\n A tensor of dtype `Tseed` and shape [3] or [n, 3]. The random seed.\n max_index: A `Tensor`. Must have the same type as `index`.\n A scalar tensor or vector of dtype `dtype`. The upper bound(s) of the interval (inclusive).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `index`.\n "], ["tf.random_normal_initializer", "description: Initializer that generates tensors with a normal distribution.", false, "Initializer that generates tensors with a normal distribution.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Examples:\n\n >>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3,\n ... tf.random_normal_initializer(mean=1., stddev=2.))\n >>> v1\n \n >>> v2\n >> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.ones_initializer())\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> start = 3\n >>> limit = 18\n >>> delta = 3\n >>> tf.range(start, limit, delta)\n \n\n >>> start = 3\n >>> limit = 1\n >>> delta = -0.5\n >>> tf.range(start, limit, delta)\n \n\n >>> limit = 5\n >>> tf.range(limit)\n \n\n Args:\n start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`\n is not None; otherwise, acts as range limit and first entry defaults to 0.\n limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,\n defaults to the value of `start` while the first entry of the range\n defaults to 0.\n delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to\n 1.\n dtype: The type of the elements of the resulting tensor.\n name: A name for the operation. Defaults to \"range\".\n\n Returns:\n An 1-D `Tensor` of type `dtype`.\n\n @compatibility(numpy)\n Equivalent to np.arange\n @end_compatibility\n "], ["tf.rank", "description: Returns the rank of a tensor.", false, "Returns the rank of a tensor.\n\n See also `tf.shape`.\n\n Returns a 0-D `int32` `Tensor` representing the rank of `input`.\n\n For example:\n\n ```python\n # shape of tensor 't' is [2, 2, 3]\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.rank(t) # 3\n ```\n\n **Note**: The rank of a tensor is not the same as the rank of a matrix. The\n rank of a tensor is the number of indices required to uniquely select each\n element of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int32`.\n\n @compatibility(numpy)\n Equivalent to np.ndim\n @end_compatibility\n "], ["tf.raw_ops", "description: Public API for tf.raw_ops namespace.", true, "Public API for tf.raw_ops namespace.\n"], ["tf.realdiv", "description: Returns x / y element-wise for real types.", false, "Returns x / y element-wise for real types.\n\n If `x` and `y` are reals, this will return the floating-point division.\n\n *NOTE*: `Div` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "], ["tf.recompute_grad", "description: Defines a function as a recompute-checkpoint for the tape auto-diff.", false, "Defines a function as a recompute-checkpoint for the tape auto-diff.\n\n Tape checkpointing is a technique to reduce the memory consumption of the\n auto-diff tape:\n\n - Without tape checkpointing operations and intermediate values are\n recorded to the tape for use in the backward pass.\n\n - With tape checkpointing, only the function call and its inputs are\n recorded. During back-propagation the `recompute_grad` custom gradient\n (`tf.custom_gradient`) recomputes the function under a localized Tape object.\n This recomputation of the function during backpropagation performs redundant\n calculation, but reduces the overall memory usage of the Tape.\n\n >>> y = tf.Variable(1.0)\n\n >>> def my_function(x):\n ... tf.print('running')\n ... z = x*y\n ... return z\n\n >>> my_function_recompute = tf.recompute_grad(my_function)\n\n >>> with tf.GradientTape() as tape:\n ... r = tf.constant(1.0)\n ... for i in range(4):\n ... r = my_function_recompute(r)\n running\n running\n running\n running\n\n >>> grad = tape.gradient(r, [y])\n running\n running\n running\n running\n\n Without `recompute_grad`, the tape contains all intermitate steps, and no\n recomputation is performed.\n\n >>> with tf.GradientTape() as tape:\n ... r = tf.constant(1.0)\n ... for i in range(4):\n ... r = my_function(r)\n running\n running\n running\n running\n\n >>> grad = tape.gradient(r, [y])\n\n\n If `f` was a `tf.keras` `Model` or `Layer` object, methods and attributes\n such as `f.variables` are not available on the returned function `g`.\n Either keep a reference of `f` , or use `g.__wrapped__` for accessing\n these variables and methods.\n\n\n >>> def print_running_and_return(x):\n ... tf.print(\"running\")\n ... return x\n\n >>> model = tf.keras.Sequential([\n ... tf.keras.layers.Lambda(print_running_and_return),\n ... tf.keras.layers.Dense(2)\n ... ])\n\n >>> model_recompute = tf.recompute_grad(model)\n\n >>> with tf.GradientTape(persistent=True) as tape:\n ... r = tf.constant([[1,2]])\n ... for i in range(4):\n ... r = model_recompute(r)\n running\n running\n running\n running\n\n >>> grad = tape.gradient(r, model.variables)\n running\n running\n running\n running\n\n Alternatively, use the `__wrapped__` attribute to access the original\n model object.\n\n >>> grad = tape.gradient(r, model_recompute.__wrapped__.variables)\n running\n running\n running\n running\n\n\n Args:\n f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.\n\n Returns:\n A function `g` wrapping `f` that defines a custom gradient, which recomputes\n `f` on the backwards pass of a gradient call.\n "], ["tf.RegisterGradient", "description: A decorator for registering the gradient function for an op type.", false, "A decorator for registering the gradient function for an op type.\n\n This decorator is only used when defining a new op type. For an op\n with `m` inputs and `n` outputs, the gradient function is a function\n that takes the original `Operation` and `n` `Tensor` objects\n (representing the gradients with respect to each output of the op),\n and returns `m` `Tensor` objects (representing the partial gradients\n with respect to each input of the op).\n\n For example, assuming that operations of type `\"Sub\"` take two\n inputs `x` and `y`, and return a single output `x - y`, the\n following gradient function would be registered:\n\n ```python\n @tf.RegisterGradient(\"Sub\")\n def _sub_grad(unused_op, grad):\n return grad, tf.negative(grad)\n ```\n\n The decorator argument `op_type` is the string type of an\n operation. This corresponds to the `OpDef.name` field for the proto\n that defines the operation.\n "], ["tf.register_tensor_conversion_function", "description: Registers a function for converting objects of base_type to Tensor.", false, "Registers a function for converting objects of `base_type` to `Tensor`.\n\n The conversion function must have the following signature:\n\n ```python\n def conversion_func(value, dtype=None, name=None, as_ref=False):\n # ...\n ```\n\n It must return a `Tensor` with the given `dtype` if specified. If the\n conversion function creates a new `Tensor`, it should use the given\n `name` if specified. All exceptions will be propagated to the caller.\n\n The conversion function may return `NotImplemented` for some\n inputs. In this case, the conversion process will continue to try\n subsequent conversion functions.\n\n If `as_ref` is true, the function must return a `Tensor` reference,\n such as a `Variable`.\n\n NOTE: The conversion functions will execute in order of priority,\n followed by order of registration. To ensure that a conversion function\n `F` runs before another conversion function `G`, ensure that `F` is\n registered with a smaller priority than `G`.\n\n Args:\n base_type: The base type or tuple of base types for all objects that\n `conversion_func` accepts.\n conversion_func: A function that converts instances of `base_type` to\n `Tensor`.\n priority: Optional integer that indicates the priority for applying this\n conversion function. Conversion functions with smaller priority values run\n earlier than conversion functions with larger priority values. Defaults to\n 100.\n\n Raises:\n TypeError: If the arguments do not have the appropriate type.\n "], ["tf.repeat", "description: Repeat elements of input.", false, "Repeat elements of `input`.\n\n See also `tf.concat`, `tf.stack`, `tf.tile`.\n\n Args:\n input: An `N`-dimensional Tensor.\n repeats: An 1-D `int` Tensor. The number of repetitions for each element.\n repeats is broadcasted to fit the shape of the given axis. `len(repeats)`\n must equal `input.shape[axis]` if axis is not None.\n axis: An int. The axis along which to repeat values. By default (axis=None),\n use the flattened input array, and return a flat output array.\n name: A name for the operation.\n\n Returns:\n A Tensor which has the same shape as `input`, except along the given axis.\n If axis is None then the output array is flattened to match the flattened\n input array.\n\n Example usage:\n\n >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)\n \n\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)\n \n\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)\n \n\n >>> repeat(3, repeats=4)\n \n\n >>> repeat([[1,2], [3,4]], repeats=2)\n \n\n "], ["tf.required_space_to_batch_paddings", "description: Calculate padding required to make block_shape divide input_shape.", false, "Calculate padding required to make block_shape divide input_shape.\n\n This function can be used to calculate a suitable paddings argument for use\n with space_to_batch_nd and batch_to_space_nd.\n\n Args:\n input_shape: int32 Tensor of shape [N].\n block_shape: int32 Tensor of shape [N].\n base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum\n amount of padding to use. All elements must be >= 0. If not specified,\n defaults to 0.\n name: string. Optional name prefix.\n\n Returns:\n (paddings, crops), where:\n\n `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]\n satisfying:\n\n paddings[i, 0] = base_paddings[i, 0].\n 0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]\n (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0\n\n crops[i, 0] = 0\n crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]\n\n Raises: ValueError if called with incompatible shapes.\n "], ["tf.reshape", "description: Reshapes a tensor.", false, "Reshapes a tensor.\n\n Given `tensor`, this operation returns a new `tf.Tensor` that has the same\n values as `tensor` in the same order, except with a new shape given by\n `shape`.\n\n >>> t1 = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> print(tf.shape(t1).numpy())\n [2 3]\n >>> t2 = tf.reshape(t1, [6])\n >>> t2\n \n >>> tf.reshape(t2, [3, 2])\n \n\n The `tf.reshape` does not change the order of or the total number of elements\n in the tensor, and so it can reuse the underlying data buffer. This makes it\n a fast operation independent of how big of a tensor it is operating on.\n\n >>> tf.reshape([1, 2, 3], [2, 2])\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Input to reshape is a tensor with 3 values, but the\n requested shape has 4\n\n To instead reorder the data to rearrange the dimensions of a tensor, see\n `tf.transpose`.\n\n >>> t = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> tf.reshape(t, [3, 2]).numpy()\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n >>> tf.transpose(t, perm=[1, 0]).numpy()\n array([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)\n\n If one component of `shape` is the special value -1, the size of that\n dimension is computed so that the total size remains constant. In particular,\n a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can\n be -1.\n\n >>> t = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> tf.reshape(t, [-1])\n \n >>> tf.reshape(t, [3, -1])\n \n >>> tf.reshape(t, [-1, 2])\n \n\n `tf.reshape(t, [])` reshapes a tensor `t` with one element to a scalar.\n\n >>> tf.reshape([7], []).numpy()\n 7\n\n More examples:\n\n >>> t = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> print(tf.shape(t).numpy())\n [9]\n >>> tf.reshape(t, [3, 3])\n \n\n >>> t = [[[1, 1], [2, 2]],\n ... [[3, 3], [4, 4]]]\n >>> print(tf.shape(t).numpy())\n [2 2 2]\n >>> tf.reshape(t, [2, 4])\n \n\n >>> t = [[[1, 1, 1],\n ... [2, 2, 2]],\n ... [[3, 3, 3],\n ... [4, 4, 4]],\n ... [[5, 5, 5],\n ... [6, 6, 6]]]\n >>> print(tf.shape(t).numpy())\n [3 2 3]\n >>> # Pass '[-1]' to flatten 't'.\n >>> tf.reshape(t, [-1])\n \n >>> # -- Using -1 to infer the shape --\n >>> # Here -1 is inferred to be 9:\n >>> tf.reshape(t, [2, -1])\n \n >>> # -1 is inferred to be 2:\n >>> tf.reshape(t, [-1, 9])\n \n >>> # -1 is inferred to be 3:\n >>> tf.reshape(t, [ 2, -1, 3])\n \n\n Args:\n tensor: A `Tensor`.\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Defines the shape of the output tensor.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "], ["tf.reverse", "description: Reverses specific dimensions of a tensor.", false, "Reverses specific dimensions of a tensor.\n\n Given a `tensor`, and a `int32` tensor `axis` representing the set of\n dimensions of `tensor` to reverse. This operation reverses each dimension\n `i` for which there exists `j` s.t. `axis[j] == i`.\n\n `tensor` can have up to 8 dimensions. The number of dimensions specified\n in `axis` may be 0 or more entries. If an index is specified more than\n once, a InvalidArgument error is raised.\n\n For example:\n\n ```\n # tensor 't' is [[[[ 0, 1, 2, 3],\n # [ 4, 5, 6, 7],\n # [ 8, 9, 10, 11]],\n # [[12, 13, 14, 15],\n # [16, 17, 18, 19],\n # [20, 21, 22, 23]]]]\n # tensor 't' shape is [1, 2, 3, 4]\n\n # 'dims' is [3] or 'dims' is [-1]\n reverse(t, dims) ==> [[[[ 3, 2, 1, 0],\n [ 7, 6, 5, 4],\n [ 11, 10, 9, 8]],\n [[15, 14, 13, 12],\n [19, 18, 17, 16],\n [23, 22, 21, 20]]]]\n\n # 'dims' is '[1]' (or 'dims' is '[-3]')\n reverse(t, dims) ==> [[[[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]\n [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]]]\n\n # 'dims' is '[2]' (or 'dims' is '[-2]')\n reverse(t, dims) ==> [[[[8, 9, 10, 11],\n [4, 5, 6, 7],\n [0, 1, 2, 3]]\n [[20, 21, 22, 23],\n [16, 17, 18, 19],\n [12, 13, 14, 15]]]]\n ```\n\n Args:\n tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `int64`, `uint64`, `bool`, `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`, `string`.\n Up to 8-D.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 1-D. The indices of the dimensions to reverse. Must be in the range\n `[-rank(tensor), rank(tensor))`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "], ["tf.reverse_sequence", "description: Reverses variable length slices.", false, "Reverses variable length slices.\n\n This op first slices `input` along the dimension `batch_axis`, and for\n each slice `i`, reverses the first `seq_lengths[i]` elements along the\n dimension `seq_axis`.\n\n The elements of `seq_lengths` must obey `seq_lengths[i] <=\n input.dims[seq_axis]`, and `seq_lengths` must be a vector of length\n `input.dims[batch_axis]`.\n\n The output slice `i` along dimension `batch_axis` is then given by\n input slice `i`, with the first `seq_lengths[i]` slices along\n dimension `seq_axis` reversed.\n\n Example usage:\n\n >>> seq_lengths = [7, 2, 3, 5]\n >>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],\n ... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]\n >>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)\n >>> output\n \n\n Args:\n input: A `Tensor`. The input to reverse.\n seq_lengths: A `Tensor`. Must be one of the following types: `int32`,\n `int64`. 1-D with length `input.dims(batch_axis)` and `max(seq_lengths) <=\n input.dims(seq_axis)`\n seq_axis: An `int`. The dimension which is partially reversed.\n batch_axis: An optional `int`. Defaults to `0`. The dimension along which\n reversal is performed.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor. Has the same type as input.\n "], ["tf.roll", "description: Rolls the elements of a tensor along an axis.", false, "Rolls the elements of a tensor along an axis.\n\n The elements are shifted positively (towards larger indices) by the offset of\n `shift` along the dimension of `axis`. Negative `shift` values will shift\n elements in the opposite direction. Elements that roll passed the last position\n will wrap around to the first and vice versa. Multiple shifts along multiple\n axes may be specified.\n\n For example:\n\n ```\n # 't' is [0, 1, 2, 3, 4]\n roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]\n\n # shifting along multiple dimensions\n # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\n roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]\n\n # shifting along the same axis multiple times\n # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\n roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]\n ```\n\n Args:\n input: A `Tensor`.\n shift: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which\n elements are shifted positively (towards larger indices) along the dimension\n specified by `axis[i]`. Negative shifts will roll the elements in the opposite\n direction.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift\n `shift[i]` should occur. If the same axis is referenced more than once, the\n total shift for that axis will be the sum of all the shifts that belong to that\n axis.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "], ["tf.saved_model", "description: Public API for tf.saved_model namespace.", true, "Public API for tf.saved_model namespace.\n"], ["tf.scan", "description: scan on the list of tensors unpacked from elems on dimension 0. (deprecated argument values)", false, "scan on the list of tensors unpacked from `elems` on dimension 0. (deprecated argument values)\n\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(back_prop=False)`. They will be removed in a future version.\nInstructions for updating:\nback_prop=False is deprecated. Consider using tf.stop_gradient instead.\nInstead of:\nresults = tf.scan(fn, elems, back_prop=False)\nUse:\nresults = tf.nest.map_structure(tf.stop_gradient, tf.scan(fn, elems))\n\nThe simplest version of `scan` repeatedly applies the callable `fn` to a\nsequence of elements from first to last. The elements are made of the tensors\nunpacked from `elems` on dimension 0. The callable fn takes two tensors as\narguments. The first argument is the accumulated value computed from the\npreceding invocation of fn, and the second is the value at the current\nposition of `elems`. If `initializer` is None, `elems` must contain at least\none element, and its first element is used as the initializer.\n\nSuppose that `elems` is unpacked into `values`, a list of tensors. The shape\nof the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.\nIf reverse=True, it's fn(initializer, values[-1]).shape.\n\nThis method also allows multi-arity `elems` and accumulator. If `elems`\nis a (possibly nested) list or tuple of tensors, then each of these tensors\nmust have a matching first (unpack) dimension. The second argument of\n`fn` must match the structure of `elems`.\n\nIf no `initializer` is provided, the output structure and dtypes of `fn`\nare assumed to be the same as its input; and in this case, the first\nargument of `fn` must match the structure of `elems`.\n\nIf an `initializer` is provided, then the output of `fn` must have the same\nstructure as `initializer`; and the first argument of `fn` must match\nthis structure.\n\nFor example, if `elems` is `(t1, [t2, t3])` and `initializer` is\n`[i1, i2]` then an appropriate signature for `fn` in `python2` is:\n`fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list,\n`[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the\n one that works in `python3`, is:\n`fn = lambda a, t:`, where `a` and `t` correspond to the input tuples.\n\nArgs:\n fn: The callable to be performed. It accepts two arguments. The first will\n have the same structure as `initializer` if one is provided, otherwise it\n will have the same structure as `elems`. The second will have the same\n (possibly nested) structure as `elems`. Its output must have the same\n structure as `initializer` if one is provided, otherwise it must have the\n same structure as `elems`.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unpacked along their first dimension. The nested sequence of the\n resulting slices will be the first argument to `fn`.\n initializer: (optional) A tensor or (possibly nested) sequence of tensors,\n initial value for the accumulator, and the expected output type of `fn`.\n parallel_iterations: (optional) The number of iterations allowed to run in\n parallel.\n back_prop: (optional) Deprecated. False disables support for back\n propagation. Prefer using `tf.stop_gradient` instead.\n swap_memory: (optional) True enables GPU-CPU memory swapping.\n infer_shape: (optional) False disables tests for consistent output shapes.\n reverse: (optional) True scans the tensor last to first (instead of first to\n last).\n name: (optional) Name prefix for the returned tensors.\n\nReturns:\n A tensor or (possibly nested) sequence of tensors. Each tensor packs the\n results of applying `fn` to tensors unpacked from `elems` along the first\n dimension, and the previous accumulator value(s), from first to last (or\n last to first, if `reverse=True`).\n\nRaises:\n TypeError: if `fn` is not callable or the structure of the output of\n `fn` and `initializer` do not match.\n ValueError: if the lengths of the output of `fn` and `initializer`\n do not match.\n\nExamples:\n ```python\n elems = np.array([1, 2, 3, 4, 5, 6])\n sum = scan(lambda a, x: a + x, elems)\n # sum == [1, 3, 6, 10, 15, 21]\n sum = scan(lambda a, x: a + x, elems, reverse=True)\n # sum == [21, 20, 18, 15, 11, 6]\n ```\n\n ```python\n elems = np.array([1, 2, 3, 4, 5, 6])\n initializer = np.array(0)\n sum_one = scan(\n lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer)\n # sum_one == [1, 2, 3, 4, 5, 6]\n ```\n\n ```python\n elems = np.array([1, 0, 0, 0, 0, 0])\n initializer = (np.array(0), np.array(1))\n fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer)\n # fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13])\n ```"], ["tf.scatter_nd", "description: Scatters updates into a tensor of shape shape according to indices.", false, "Scatters `updates` into a tensor of shape `shape` according to `indices`.\n\n Update the input tensor by scattering sparse `updates` according to individual values at the specified `indices`.\n This op returns an `output` tensor with the `shape` you specify. This op is the\n inverse of the `tf.gather_nd` operator which extracts values or slices from a\n given tensor.\n\n This operation is similar to `tf.tensor_scatter_nd_add`, except that the tensor\n is zero-initialized. Calling `tf.scatter_nd(indices, values, shape)`\n is identical to calling\n `tf.tensor_scatter_nd_add(tf.zeros(shape, values.dtype), indices, values)`\n\n If `indices` contains duplicates, the duplicate `values` are accumulated\n (summed).\n\n **WARNING**: The order in which updates are applied is nondeterministic, so the\n output will be nondeterministic if `indices` contains duplicates;\n numbers summed in different order may yield different results because of some\n numerical approximation issues.\n\n `indices` is an integer tensor of shape `shape`. The last dimension\n of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\n The last dimension of `indices` corresponds to indices of elements\n (if `indices.shape[-1] = shape.rank`) or slices\n (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n `shape`.\n\n `updates` is a tensor with shape:\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\n The simplest form of the scatter op is to insert individual elements in\n a tensor by index. Consider an example where you want to insert 4 scattered\n elements in a rank-1 tensor with 8 elements.\n\n
\n \n
\n\n In Python, this scatter operation would look like this:\n\n ```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n shape = tf.constant([8])\n scatter = tf.scatter_nd(indices, updates, shape)\n print(scatter)\n ```\n\n The resulting tensor would look like this:\n\n [0, 11, 0, 10, 9, 0, 0, 12]\n\n You can also insert entire slices of a higher rank tensor all at once. For\n example, you can insert two slices in the first dimension of a rank-3 tensor\n with two matrices of new values.\n\n
\n \n
\n\n In Python, this scatter operation would look like this:\n\n ```python\n indices = tf.constant([[0], [2]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n shape = tf.constant([4, 4, 4])\n scatter = tf.scatter_nd(indices, updates, shape)\n print(scatter)\n ```\n\n The resulting tensor would look like this:\n\n [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]\n\n Note that on CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, the index is ignored.\n\n Args:\n indices: A `Tensor`. Must be one of the following types: `int16`, `int32`, `int64`.\n Tensor of indices.\n updates: A `Tensor`. Values to scatter into the output tensor.\n shape: A `Tensor`. Must have the same type as `indices`.\n 1-D. The shape of the output tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `updates`.\n "], ["tf.searchsorted", "description: Searches for where a value would go in a sorted sequence.", false, "Searches for where a value would go in a sorted sequence.\n\n This is not a method for checking containment (like python `in`).\n\n The typical use case for this operation is \"binning\", \"bucketing\", or\n \"discretizing\". The `values` are assigned to bucket-indices based on the\n **edges** listed in `sorted_sequence`. This operation\n returns the bucket-index for each value.\n\n >>> edges = [-1, 3.3, 9.1, 10.0]\n >>> values = [0.0, 4.1, 12.0]\n >>> tf.searchsorted(edges, values).numpy()\n array([1, 2, 4], dtype=int32)\n\n The `side` argument controls which index is returned if a value lands exactly\n on an edge:\n\n >>> seq = [0, 3, 9, 10, 10]\n >>> values = [0, 4, 10]\n >>> tf.searchsorted(seq, values).numpy()\n array([0, 2, 3], dtype=int32)\n >>> tf.searchsorted(seq, values, side=\"right\").numpy()\n array([1, 2, 5], dtype=int32)\n\n The `axis` is not settable for this operation. It always operates on the\n innermost dimension (`axis=-1`). The operation will accept any number of\n outer dimensions. Here it is applied to the rows of a matrix:\n\n >>> sorted_sequence = [[0., 3., 8., 9., 10.],\n ... [1., 2., 3., 4., 5.]]\n >>> values = [[9.8, 2.1, 4.3],\n ... [0.1, 6.6, 4.5, ]]\n >>> tf.searchsorted(sorted_sequence, values).numpy()\n array([[4, 1, 2],\n [0, 5, 4]], dtype=int32)\n\n Note: This operation assumes that `sorted_sequence` **is sorted** along the\n innermost axis, maybe using `tf.sort(..., axis=-1)`. **If the sequence is not\n sorted no error is raised** and the content of the returned tensor is not well\n defined.\n\n Args:\n sorted_sequence: N-D `Tensor` containing a sorted sequence.\n values: N-D `Tensor` containing the search values.\n side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to\n upper_bound.\n out_type: The output type (`int32` or `int64`). Default is `tf.int32`.\n name: Optional name for the operation.\n\n Returns:\n An N-D `Tensor` the size of `values` containing the result of applying\n either lower_bound or upper_bound (depending on side) to each value. The\n result is not a global index to the entire `Tensor`, but the index in the\n last dimension.\n\n Raises:\n ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.\n If the total size of `values` exceeds `2^31 - 1` elements.\n If the first `N-1` dimensions of the two tensors don't match.\n "], ["tf.sequence_mask", "description: Returns a mask tensor representing the first N positions of each cell.", false, "Returns a mask tensor representing the first N positions of each cell.\n\n If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has\n dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with\n\n ```\n mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])\n ```\n\n Examples:\n\n ```python\n tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],\n # [True, True, True, False, False],\n # [True, True, False, False, False]]\n\n tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],\n # [True, True, True]],\n # [[True, True, False],\n # [False, False, False]]]\n ```\n\n Args:\n lengths: integer tensor, all its values <= maxlen.\n maxlen: scalar integer tensor, size of last dimension of returned tensor.\n Default is the maximum value in `lengths`.\n dtype: output type of the resulting tensor.\n name: name of the op.\n\n Returns:\n A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.\n Raises:\n ValueError: if `maxlen` is not a scalar.\n "], ["tf.sets", "description: Tensorflow set operations.", true, "Tensorflow set operations.\n"], ["tf.shape", "description: Returns a tensor containing the shape of the input tensor.", false, "Returns a tensor containing the shape of the input tensor.\n\n See also `tf.size`, `tf.rank`.\n\n `tf.shape` returns a 1-D integer tensor representing the shape of `input`.\n For a scalar input, the tensor returned has a shape of (0,) and its value is\n the empty vector (i.e. []).\n\n For example:\n\n >>> tf.shape(1.)\n \n\n >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n >>> tf.shape(t)\n \n\n Note: When using symbolic tensors, such as when using the Keras API,\n tf.shape() will return the shape of the symbolic tensor.\n\n >>> a = tf.keras.layers.Input((None, 10))\n >>> tf.shape(a)\n <... shape=(3,) dtype=int32...>\n\n In these cases, using `tf.Tensor.shape` will return more informative results.\n\n >>> a.shape\n TensorShape([None, None, 10])\n\n (The first `None` represents the as yet unknown batch size.)\n\n `tf.shape` and `Tensor.shape` should be identical in eager mode. Within\n `tf.function` or within a `compat.v1` context, not all dimensions may be\n known until execution time. Hence when defining custom layers and models\n for graph mode, prefer the dynamic `tf.shape(x)` over the static `x.shape`.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n out_type: (Optional) The specified output type of the operation (`int32` or\n `int64`). Defaults to `tf.int32`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `out_type`.\n "], ["tf.shape_n", "description: Returns shape of tensors.", false, "Returns shape of tensors.\n\n Args:\n input: A list of at least 1 `Tensor` object with the same type.\n out_type: The specified output type of the operation (`int32` or `int64`).\n Defaults to `tf.int32`(optional).\n name: A name for the operation (optional).\n\n Returns:\n A list with the same length as `input` of `Tensor` objects with\n type `out_type`.\n "], ["tf.signal", "description: Signal processing operations.", true, "Signal processing operations.\n\nSee the [tf.signal](https://tensorflow.org/api_guides/python/contrib.signal)\nguide.\n\n@@frame\n@@hamming_window\n@@hann_window\n@@inverse_stft\n@@inverse_stft_window_fn\n@@mfccs_from_log_mel_spectrograms\n@@linear_to_mel_weight_matrix\n@@overlap_and_add\n@@stft\n\n[hamming]: https://en.wikipedia.org/wiki/Window_function#Hamming_window\n[hann]: https://en.wikipedia.org/wiki/Window_function#Hann_window\n[mel]: https://en.wikipedia.org/wiki/Mel_scale\n[mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum\n[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform\n\n"], ["tf.size", "description: Returns the size of a tensor.", false, "Returns the size of a tensor.\n\n See also `tf.shape`.\n\n Returns a 0-D `Tensor` representing the number of elements in `input`\n of type `out_type`. Defaults to tf.int32.\n\n For example:\n\n >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n >>> tf.size(t)\n \n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified non-quantized numeric output type of the\n operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n\n @compatibility(numpy)\n Equivalent to np.size()\n @end_compatibility\n "], ["tf.slice", "description: Extracts a slice from a tensor.", false, "Extracts a slice from a tensor.\n\n See also `tf.strided_slice`.\n\n This operation extracts a slice of size `size` from a tensor `input_` starting\n at the location specified by `begin`. The slice `size` is represented as a\n tensor shape, where `size[i]` is the number of elements of the 'i'th dimension\n of `input_` that you want to slice. The starting location (`begin`) for the\n slice is represented as an offset in each dimension of `input_`. In other\n words, `begin[i]` is the offset into the i'th dimension of `input_` that you\n want to slice from.\n\n Note that `tf.Tensor.__getitem__` is typically a more pythonic way to\n perform slices, as it allows you to write `foo[3:7, :-2]` instead of\n `tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.\n\n `begin` is zero-based; `size` is one-based. If `size[i]` is -1,\n all remaining elements in dimension i are included in the\n slice. In other words, this is equivalent to setting:\n\n `size[i] = input_.dim_size(i) - begin[i]`\n\n This operation requires that:\n\n `0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]\n tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],\n # [[5, 5, 5]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n size: An `int32` or `int64` `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input_`.\n "], ["tf.sort", "description: Sorts a tensor.", false, "Sorts a tensor.\n\n Usage:\n\n >>> a = [1, 10, 26.9, 2.8, 166.32, 62.3]\n >>> tf.sort(a).numpy()\n array([ 1. , 2.8 , 10. , 26.9 , 62.3 , 166.32], dtype=float32)\n\n >>> tf.sort(a, direction='DESCENDING').numpy()\n array([166.32, 62.3 , 26.9 , 10. , 2.8 , 1. ], dtype=float32)\n\n For multidimensional inputs you can control which axis the sort is applied\n along. The default `axis=-1` sorts the innermost axis.\n\n >>> mat = [[3,2,1],\n ... [2,1,3],\n ... [1,3,2]]\n >>> tf.sort(mat, axis=-1).numpy()\n array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]], dtype=int32)\n >>> tf.sort(mat, axis=0).numpy()\n array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]], dtype=int32)\n\n See also:\n\n * `tf.argsort`: Like sort, but it returns the sort indices.\n * `tf.math.top_k`: A partial sort that returns a fixed number of top values\n and corresponding indices.\n\n\n Args:\n values: 1-D or higher **numeric** `Tensor`.\n axis: The axis along which to sort. The default is -1, which sorts the last\n axis.\n direction: The direction in which to sort the values (`'ASCENDING'` or\n `'DESCENDING'`).\n name: Optional name for the operation.\n\n Returns:\n A `Tensor` with the same dtype and shape as `values`, with the elements\n sorted along the given `axis`.\n\n Raises:\n tf.errors.InvalidArgumentError: If the `values.dtype` is not a `float` or\n `int` type.\n ValueError: If axis is not a constant scalar, or the direction is invalid.\n "], ["tf.space_to_batch", "description: SpaceToBatch for N-D tensors of type T.", false, "SpaceToBatch for N-D tensors of type T.\n\n This operation divides \"spatial\" dimensions `[1, ..., M]` of the input into a\n grid of blocks of shape `block_shape`, and interleaves these blocks with the\n \"batch\" dimension (0) such that in the output, the spatial dimensions\n `[1, ..., M]` correspond to the position within the grid, and the batch\n dimension combines both the position within a spatial block and the original\n batch position. Prior to division into blocks, the spatial dimensions of the\n input are optionally zero padded according to `paddings`. See below for a\n precise description.\n\n This operation is equivalent to the following steps:\n\n 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n 2. Reshape `padded` to `reshaped_padded` of shape:\n\n [batch] +\n [padded_shape[1] / block_shape[0],\n block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1],\n block_shape[M-1]] +\n remaining_shape\n\n 3. Permute dimensions of `reshaped_padded` to produce\n `permuted_reshaped_padded` of shape:\n\n block_shape +\n [batch] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n dimension, producing an output tensor of shape:\n\n [batch * prod(block_shape)] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n Some examples:\n\n (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1], [2]], [[3], [4]]]]\n ```\n\n The output tensor has shape `[4, 1, 1, 1]` and value:\n\n ```\n [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n ```\n\n (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n ```\n\n The output tensor has shape `[4, 1, 1, 3]` and value:\n\n ```\n [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n ```\n\n (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n The output tensor has shape `[4, 2, 2, 1]` and value:\n\n ```\n x = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n ```\n\n (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n paddings = `[[0, 0], [2, 0]]`:\n\n ```\n x = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n The output tensor has shape `[8, 1, 3, 1]` and value:\n\n ```\n x = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n ```\n\n Among others, this operation is useful for reducing atrous convolution into\n regular convolution.\n\n Args:\n input: A `Tensor`.\n N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\n where spatial_shape has `M` dimensions.\n block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 1-D with shape `[M]`, all values must be >= 1.\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 2-D with shape `[M, 2]`, all values must be >= 0.\n `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n `i + 1`, which corresponds to spatial dimension `i`. It is required that\n `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "], ["tf.space_to_batch_nd", "description: SpaceToBatch for N-D tensors of type T.", false, "SpaceToBatch for N-D tensors of type T.\n\n This operation divides \"spatial\" dimensions `[1, ..., M]` of the input into a\n grid of blocks of shape `block_shape`, and interleaves these blocks with the\n \"batch\" dimension (0) such that in the output, the spatial dimensions\n `[1, ..., M]` correspond to the position within the grid, and the batch\n dimension combines both the position within a spatial block and the original\n batch position. Prior to division into blocks, the spatial dimensions of the\n input are optionally zero padded according to `paddings`. See below for a\n precise description.\n\n This operation is equivalent to the following steps:\n\n 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n 2. Reshape `padded` to `reshaped_padded` of shape:\n\n [batch] +\n [padded_shape[1] / block_shape[0],\n block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1],\n block_shape[M-1]] +\n remaining_shape\n\n 3. Permute dimensions of `reshaped_padded` to produce\n `permuted_reshaped_padded` of shape:\n\n block_shape +\n [batch] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n dimension, producing an output tensor of shape:\n\n [batch * prod(block_shape)] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n Some examples:\n\n (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1], [2]], [[3], [4]]]]\n ```\n\n The output tensor has shape `[4, 1, 1, 1]` and value:\n\n ```\n [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n ```\n\n (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n ```\n\n The output tensor has shape `[4, 1, 1, 3]` and value:\n\n ```\n [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n ```\n\n (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n The output tensor has shape `[4, 2, 2, 1]` and value:\n\n ```\n x = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n ```\n\n (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n paddings = `[[0, 0], [2, 0]]`:\n\n ```\n x = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n The output tensor has shape `[8, 1, 3, 1]` and value:\n\n ```\n x = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n ```\n\n Among others, this operation is useful for reducing atrous convolution into\n regular convolution.\n\n Args:\n input: A `Tensor`.\n N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\n where spatial_shape has `M` dimensions.\n block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 1-D with shape `[M]`, all values must be >= 1.\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 2-D with shape `[M, 2]`, all values must be >= 0.\n `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n `i + 1`, which corresponds to spatial dimension `i`. It is required that\n `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "], ["tf.sparse", "description: Sparse Tensor Representation.", true, "Sparse Tensor Representation.\n\nSee also `tf.sparse.SparseTensor`.\n\n"], ["tf.SparseTensorSpec", "description: Type specification for a tf.sparse.SparseTensor.", false, "Type specification for a `tf.sparse.SparseTensor`."], ["tf.split", "description: Splits a tensor value into a list of sub tensors.", false, "Splits a tensor `value` into a list of sub tensors.\n\n See also `tf.unstack`.\n\n If `num_or_size_splits` is an `int`, then it splits `value` along the\n dimension `axis` into `num_or_size_splits` smaller tensors. This requires that\n `value.shape[axis]` is divisible by `num_or_size_splits`.\n\n If `num_or_size_splits` is a 1-D Tensor (or list), then `value` is split into\n `len(num_or_size_splits)` elements. The shape of the `i`-th\n element has the same size as the `value` except along dimension `axis` where\n the size is `num_or_size_splits[i]`.\n\n For example:\n\n >>> x = tf.Variable(tf.random.uniform([5, 30], -1, 1))\n >>>\n >>> # Split `x` into 3 tensors along dimension 1\n >>> s0, s1, s2 = tf.split(x, num_or_size_splits=3, axis=1)\n >>> tf.shape(s0).numpy()\n array([ 5, 10], dtype=int32)\n >>>\n >>> # Split `x` into 3 tensors with sizes [4, 15, 11] along dimension 1\n >>> split0, split1, split2 = tf.split(x, [4, 15, 11], 1)\n >>> tf.shape(split0).numpy()\n array([5, 4], dtype=int32)\n >>> tf.shape(split1).numpy()\n array([ 5, 15], dtype=int32)\n >>> tf.shape(split2).numpy()\n array([ 5, 11], dtype=int32)\n\n Args:\n value: The `Tensor` to split.\n num_or_size_splits: Either an `int` indicating the number of splits\n along `axis` or a 1-D integer `Tensor` or Python list containing the sizes\n of each output tensor along `axis`. If an `int`, then it must evenly\n divide `value.shape[axis]`; otherwise the sum of sizes along the split\n axis must match that of the `value`.\n axis: An `int` or scalar `int32` `Tensor`. The dimension along which\n to split. Must be in the range `[-rank(value), rank(value))`. Defaults to\n 0.\n num: Optional, an `int`, used to specify the number of outputs when it\n cannot be inferred from the shape of `size_splits`.\n name: A name for the operation (optional).\n\n Returns:\n if `num_or_size_splits` is an `int` returns a list of\n `num_or_size_splits` `Tensor` objects; if `num_or_size_splits` is a 1-D\n list or 1-D `Tensor` returns `num_or_size_splits.get_shape[0]`\n `Tensor` objects resulting from splitting `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n ValueError: If `num_or_size_splits` is a scalar `Tensor`.\n "], ["tf.squeeze", "description: Removes dimensions of size 1 from the shape of a tensor.", false, "Removes dimensions of size 1 from the shape of a tensor.\n\n Given a tensor `input`, this operation returns a tensor of the same type with\n all dimensions of size 1 removed. If you don't want to remove all size 1\n dimensions, you can remove specific size 1 dimensions by specifying\n `axis`.\n\n For example:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t)) # [2, 3]\n ```\n\n Or, to remove specific size 1 dimensions:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]\n ```\n\n Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a\n deprecated `squeeze_dims` argument.\n\n Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`\n time, where `N` is the number of elements in the squeezed dimensions.\n\n Args:\n input: A `Tensor`. The `input` to squeeze.\n axis: An optional list of `ints`. Defaults to `[]`. If specified, only\n squeezes the dimensions listed. The dimension index starts at 0. It is an\n error to squeeze a dimension that is not 1. Must be in the range\n `[-rank(input), rank(input))`. Must be specified if `input` is a\n `RaggedTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Contains the same data as `input`, but has one or more dimensions of\n size 1 removed.\n\n Raises:\n ValueError: The input cannot be converted to a tensor, or the specified\n axis cannot be squeezed.\n "], ["tf.stack", "description: Stacks a list of rank-R tensors into one rank-(R+1) tensor.", false, "Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.\n\n See also `tf.concat`, `tf.tile`, `tf.repeat`.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the `axis` dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`;\n\n if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.\n if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.\n Etc.\n\n For example:\n\n >>> x = tf.constant([1, 4])\n >>> y = tf.constant([2, 5])\n >>> z = tf.constant([3, 6])\n >>> tf.stack([x, y, z])\n \n >>> tf.stack([x, y, z], axis=1)\n \n\n This is the opposite of unstack. The numpy equivalent is `np.stack`\n\n >>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))\n True\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n axis: An `int`. The axis to stack along. Defaults to the first dimension.\n Negative values wrap around, so the valid range is `[-(R+1), R+1)`.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n\n Raises:\n ValueError: If `axis` is out of the range [-(R+1), R+1).\n "], ["tf.stop_gradient", "description: Stops gradient computation.", false, "Stops gradient computation.\n\n When executed in a graph, this op outputs its input tensor as-is.\n\n When building ops to compute gradients, this op prevents the contribution of\n its inputs to be taken into account. Normally, the gradient generator adds ops\n to a graph to compute the derivatives of a specified 'loss' by recursively\n finding out inputs that contributed to its computation. If you insert this op\n in the graph it inputs are masked from the gradient generator. They are not\n taken into account for computing gradients.\n\n This is useful any time you want to compute a value with TensorFlow but need\n to pretend that the value was a constant. For example, the softmax function\n for a vector x can be written as\n\n ```python\n\n def softmax(x):\n numerator = tf.exp(x)\n denominator = tf.reduce_sum(numerator)\n return numerator / denominator\n ```\n\n This however is susceptible to overflow if the values in x are large. An\n alternative more stable way is to subtract the maximum of x from each of the\n values.\n\n ```python\n\n def stable_softmax(x):\n z = x - tf.reduce_max(x)\n numerator = tf.exp(z)\n denominator = tf.reduce_sum(numerator)\n return numerator / denominator\n ```\n\n However, when we backprop through the softmax to x, we dont want to backprop\n through the `tf.reduce_max(x)` (if the max values are not unique then the\n gradient could flow to the wrong input) calculation and treat that as a\n constant. Therefore, we should write this out as\n\n ```python\n\n def stable_softmax(x):\n z = x - tf.stop_gradient(tf.reduce_max(x))\n numerator = tf.exp(z)\n denominator = tf.reduce_sum(numerator)\n return numerator / denominator\n ```\n\n Some other examples include:\n\n * The *EM* algorithm where the *M-step* should not involve backpropagation\n through the output of the *E-step*.\n * Contrastive divergence training of Boltzmann machines where, when\n differentiating the energy function, the training must not backpropagate\n through the graph that generated the samples from the model.\n * Adversarial training, where no backprop should happen through the adversarial\n example generation process.\n\n Args:\n input: A `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "], ["tf.strided_slice", "description: Extracts a strided slice of a tensor (generalized Python array indexing).", false, "Extracts a strided slice of a tensor (generalized Python array indexing).\n\n See also `tf.slice`.\n\n **Instead of calling this op directly most users will want to use the\n NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which\n is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**\n The interface of this op is a low-level encoding of the slicing syntax.\n\n Roughly speaking, this op extracts a slice of size `(end-begin)/stride`\n from the given `input_` tensor. Starting at the location specified by `begin`\n the slice continues by adding `stride` to the index until all dimensions are\n not less than `end`.\n Note that a stride can be negative, which causes a reverse slice.\n\n Given a Python slice `input[spec0, spec1, ..., specn]`,\n this function will be called as follows.\n\n `begin`, `end`, and `strides` will be vectors of length n.\n n in general is not equal to the rank of the `input_` tensor.\n\n In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,\n `new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to\n the ith spec.\n\n If the ith bit of `begin_mask` is set, `begin[i]` is ignored and\n the fullest possible range in that dimension is used instead.\n `end_mask` works analogously, except with the end range.\n\n `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.\n `foo[::-1]` reverses a tensor with shape 8.\n\n If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions\n as needed will be inserted between other dimensions. Only one\n non-zero bit is allowed in `ellipsis_mask`.\n\n For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is\n equivalent to `foo[3:5,:,:,4:5]` and\n `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.\n\n If the ith bit of `new_axis_mask` is set, then `begin`,\n `end`, and `stride` are ignored and a new length 1 dimension is\n added at this point in the output tensor.\n\n For example,\n `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.\n\n If the ith bit of `shrink_axis_mask` is set, it implies that the ith\n specification shrinks the dimensionality by 1, taking on the value at index\n `begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in\n Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`\n equal to 2.\n\n\n NOTE: `begin` and `end` are zero-indexed.\n `strides` entries must be non-zero.\n\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]\n tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],\n # [3, 3, 3]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n end: An `int32` or `int64` `Tensor`.\n strides: An `int32` or `int64` `Tensor`.\n begin_mask: An `int32` mask.\n end_mask: An `int32` mask.\n ellipsis_mask: An `int32` mask.\n new_axis_mask: An `int32` mask.\n shrink_axis_mask: An `int32` mask.\n var: The variable corresponding to `input_` or None\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n "], ["tf.strings", "description: Operations for working with string Tensors.", true, "Operations for working with string Tensors.\n"], ["tf.summary", "description: Operations for writing summary data, for use in analysis and visualization.", true, "Operations for writing summary data, for use in analysis and visualization.\n\nThe `tf.summary` module provides APIs for writing summary data. This data can be\nvisualized in TensorBoard, the visualization toolkit that comes with TensorFlow.\nSee the [TensorBoard website](https://www.tensorflow.org/tensorboard) for more\ndetailed tutorials about how to use these APIs, or some quick examples below.\n\nExample usage with eager execution, the default in TF 2.0:\n\n```python\nwriter = tf.summary.create_file_writer(\"/tmp/mylogs\")\nwith writer.as_default():\n for step in range(100):\n # other model code would go here\n tf.summary.scalar(\"my_metric\", 0.5, step=step)\n writer.flush()\n```\n\nExample usage with `tf.function` graph execution:\n\n```python\nwriter = tf.summary.create_file_writer(\"/tmp/mylogs\")\n\n@tf.function\ndef my_func(step):\n # other model code would go here\n with writer.as_default():\n tf.summary.scalar(\"my_metric\", 0.5, step=step)\n\nfor step in range(100):\n my_func(step)\n writer.flush()\n```\n\nExample usage with legacy TF 1.x graph execution:\n\n```python\nwith tf.compat.v1.Graph().as_default():\n step = tf.Variable(0, dtype=tf.int64)\n step_update = step.assign_add(1)\n writer = tf.summary.create_file_writer(\"/tmp/mylogs\")\n with writer.as_default():\n tf.summary.scalar(\"my_metric\", 0.5, step=step)\n all_summary_ops = tf.compat.v1.summary.all_v2_summary_ops()\n writer_flush = writer.flush()\n\n sess = tf.compat.v1.Session()\n sess.run([writer.init(), step.initializer])\n for i in range(100):\n sess.run(all_summary_ops)\n sess.run(step_update)\n sess.run(writer_flush)\n```\n"], ["tf.switch_case", "description: Create a switch/case operation, i.e. an integer-indexed conditional.", false, "Create a switch/case operation, i.e. an integer-indexed conditional.\n\n See also `tf.case`.\n\n This op can be substantially more efficient than `tf.case` when exactly one\n branch will be selected. `tf.switch_case` is more like a C++ switch/case\n statement than `tf.case`, which is more like an if/elif/elif/else chain.\n\n The `branch_fns` parameter is either a dict from `int` to callables, or list\n of (`int`, callable) pairs, or simply a list of callables (in which case the\n index is implicitly the key). The `branch_index` `Tensor` is used to select an\n element in `branch_fns` with matching `int` key, falling back to `default`\n if none match, or `max(keys)` if no `default` is provided. The keys must form\n a contiguous set from `0` to `len(branch_fns) - 1`.\n\n `tf.switch_case` supports nested structures as implemented in `tf.nest`. All\n callables must return the same (possibly nested) value structure of lists,\n tuples, and/or named tuples.\n\n **Example:**\n\n Pseudocode:\n\n ```c++\n switch (branch_index) { // c-style switch\n case 0: return 17;\n case 1: return 31;\n default: return -1;\n }\n ```\n or\n ```python\n branches = {0: lambda: 17, 1: lambda: 31}\n branches.get(branch_index, lambda: -1)()\n ```\n\n Expressions:\n\n ```python\n def f1(): return tf.constant(17)\n def f2(): return tf.constant(31)\n def f3(): return tf.constant(-1)\n r = tf.switch_case(branch_index, branch_fns={0: f1, 1: f2}, default=f3)\n # Equivalent: tf.switch_case(branch_index, branch_fns={0: f1, 1: f2, 2: f3})\n ```\n\n Args:\n branch_index: An int Tensor specifying which of `branch_fns` should be\n executed.\n branch_fns: A `dict` mapping `int`s to callables, or a `list` of\n (`int`, callable) pairs, or simply a list of callables (in which case the\n index serves as the key). Each callable must return a matching structure\n of tensors.\n default: Optional callable that returns a structure of tensors.\n name: A name for this operation (optional).\n\n Returns:\n The tensors returned by the callable identified by `branch_index`, or those\n returned by `default` if no key matches and `default` was provided, or those\n returned by the max-keyed `branch_fn` if no `default` is provided.\n\n Raises:\n TypeError: If `branch_fns` is not a list/dictionary.\n TypeError: If `branch_fns` is a list but does not contain 2-tuples or\n callables.\n TypeError: If `fns[i]` is not callable for any i, or `default` is not\n callable.\n "], ["tf.sysconfig", "description: System configuration library.", true, "System configuration library.\n"], ["tf.Tensor", "description: A tf.Tensor represents a multidimensional array of elements.", false, "A `tf.Tensor` represents a multidimensional array of elements.\n\n All elements are of a single known data type.\n\n When writing a TensorFlow program, the main object that is\n manipulated and passed around is the `tf.Tensor`.\n\n A `tf.Tensor` has the following properties:\n\n * a single data type (float32, int32, or string, for example)\n * a shape\n\n TensorFlow supports eager execution and graph execution. In eager\n execution, operations are evaluated immediately. In graph\n execution, a computational graph is constructed for later\n evaluation.\n\n TensorFlow defaults to eager execution. In the example below, the\n matrix multiplication results are calculated immediately.\n\n >>> # Compute some values using a Tensor\n >>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n >>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])\n >>> e = tf.matmul(c, d)\n >>> print(e)\n tf.Tensor(\n [[1. 3.]\n [3. 7.]], shape=(2, 2), dtype=float32)\n\n Note that during eager execution, you may discover your `Tensors` are actually\n of type `EagerTensor`. This is an internal detail, but it does give you\n access to a useful function, `numpy`:\n\n >>> type(e)\n \n >>> print(e.numpy())\n [[1. 3.]\n [3. 7.]]\n\n In TensorFlow, `tf.function`s are a common way to define graph execution.\n\n A Tensor's shape (that is, the rank of the Tensor and the size of\n each dimension) may not always be fully known. In `tf.function`\n definitions, the shape may only be partially known.\n\n Most operations produce tensors of fully-known shapes if the shapes of their\n inputs are also fully known, but in some cases it's only possible to find the\n shape of a tensor at execution time.\n\n A number of specialized tensors are available: see `tf.Variable`,\n `tf.constant`, `tf.placeholder`, `tf.sparse.SparseTensor`, and\n `tf.RaggedTensor`.\n\n Caution: when constructing a tensor from a numpy array or pandas dataframe\n the underlying buffer may be re-used:\n\n ```python\n a = np.array([1, 2, 3])\n b = tf.constant(a)\n a[0] = 4\n print(b) # tf.Tensor([4 2 3], shape=(3,), dtype=int64)\n ```\n\n Note: this is an implementation detail that is subject to change and users\n should not rely on this behaviour.\n\n For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor).\n\n "], ["tf.TensorArray", "description: Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.", false, "Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.\n\n This class is meant to be used with dynamic iteration primitives such as\n `while_loop` and `map_fn`. It supports gradient back-propagation via special\n \"flow\" control flow dependencies.\n\n Example 1: Plain reading and writing.\n\n >>> ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True, clear_after_read=False)\n >>> ta = ta.write(0, 10)\n >>> ta = ta.write(1, 20)\n >>> ta = ta.write(2, 30)\n >>>\n >>> ta.read(0)\n \n >>> ta.read(1)\n \n >>> ta.read(2)\n \n >>> ta.stack()\n \n\n Example 2: Fibonacci sequence algorithm that writes in a loop then returns.\n\n >>> @tf.function\n ... def fibonacci(n):\n ... ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\n ... ta = ta.unstack([0., 1.])\n ...\n ... for i in range(2, n):\n ... ta = ta.write(i, ta.read(i - 1) + ta.read(i - 2))\n ...\n ... return ta.stack()\n >>>\n >>> fibonacci(7)\n \n\n Example 3: A simple loop interacting with a `tf.Variable`.\n\n >>> v = tf.Variable(1)\n >>> @tf.function\n ... def f(x):\n ... ta = tf.TensorArray(tf.int32, size=0, dynamic_size=True)\n ... for i in tf.range(x):\n ... v.assign_add(i)\n ... ta = ta.write(i, v)\n ... return ta.stack()\n >>> f(5)\n \n "], ["tf.TensorArraySpec", "description: Type specification for a tf.TensorArray.", false, "Type specification for a `tf.TensorArray`."], ["tf.tensordot", "description: Tensor contraction of a and b along specified axes and outer product.", false, "Tensor contraction of a and b along specified axes and outer product.\n\n Tensordot (also known as tensor contraction) sums the product of elements\n from `a` and `b` over the indices specified by `axes`.\n\n This operation corresponds to `numpy.tensordot(a, b, axes)`.\n\n Example 1: When `a` and `b` are matrices (order 2), the case `axes=1`\n is equivalent to matrix multiplication.\n\n Example 2: When `a` and `b` are matrices (order 2), the case\n `axes = [[1], [0]]` is equivalent to matrix multiplication.\n\n Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives\n the outer product, a tensor of order 4.\n\n Example 4: Suppose that \\\\(a_{ijk}\\\\) and \\\\(b_{lmn}\\\\) represent two\n tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor\n \\\\(c_{jklm}\\\\) whose entry\n corresponding to the indices \\\\((j,k,l,m)\\\\) is given by:\n\n \\\\( c_{jklm} = \\sum_i a_{ijk} b_{lmi} \\\\).\n\n In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.\n\n Args:\n a: `Tensor` of type `float32` or `float64`.\n b: `Tensor` with the same type as `a`.\n axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].\n If axes is a scalar, sum over the last N axes of a and the first N axes of\n b in order. If axes is a list or `Tensor` the first and second row contain\n the set of unique integers specifying axes along which the contraction is\n computed, for `a` and `b`, respectively. The number of axes for `a` and\n `b` must be equal. If `axes=0`, computes the outer product between `a` and\n `b`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `a`.\n\n Raises:\n ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.\n IndexError: If the values in axes exceed the rank of the corresponding\n tensor.\n "], ["tf.TensorShape", "description: Represents the shape of a Tensor.", false, "Represents the shape of a `Tensor`.\n\n A `TensorShape` represents a possibly-partial shape specification for a\n `Tensor`. It may be one of the following:\n\n * *Fully-known shape:* has a known number of dimensions and a known size\n for each dimension. e.g. `TensorShape([16, 256])`\n * *Partially-known shape:* has a known number of dimensions, and an unknown\n size for one or more dimension. e.g. `TensorShape([None, 256])`\n * *Unknown shape:* has an unknown number of dimensions, and an unknown\n size in all dimensions. e.g. `TensorShape(None)`\n\n If a tensor is produced by an operation of type `\"Foo\"`, its shape\n may be inferred if there is a registered shape function for\n `\"Foo\"`. See [Shape\n functions](https://www.tensorflow.org/guide/create_op#shape_functions_in_c)\n for details of shape functions and how to register them. Alternatively,\n you may set the shape explicitly using `tf.Tensor.set_shape`.\n "], ["tf.TensorSpec", "description: Describes a tf.Tensor.", false, "Describes a tf.Tensor.\n\n Metadata for describing the `tf.Tensor` objects accepted or returned\n by some TensorFlow APIs.\n "], ["tf.tensor_scatter_nd_add", "description: Adds sparse updates to an existing tensor according to indices.", false, "Adds sparse `updates` to an existing tensor according to `indices`.\n\n This operation creates a new tensor by adding sparse `updates` to the passed\n in `tensor`.\n This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the\n updates are added onto an existing tensor (as opposed to a variable). If the\n memory for the existing tensor cannot be re-used, a copy is made and updated.\n\n `indices` is an integer tensor containing indices into a new tensor of shape\n `tensor.shape`. The last dimension of `indices` can be at most the rank of\n `tensor.shape`:\n\n ```\n indices.shape[-1] <= tensor.shape.rank\n ```\n\n The last dimension of `indices` corresponds to indices into elements\n (if `indices.shape[-1] = tensor.shape.rank`) or slices\n (if `indices.shape[-1] < tensor.shape.rank`) along dimension\n `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape\n\n ```\n indices.shape[:-1] + tensor.shape[indices.shape[-1]:]\n ```\n\n The simplest form of `tensor_scatter_nd_add` is to add individual elements to a\n tensor by index. For example, say we want to add 4 elements in a rank-1\n tensor with 8 elements.\n\n In Python, this scatter add operation would look like this:\n\n >>> indices = tf.constant([[4], [3], [1], [7]])\n >>> updates = tf.constant([9, 10, 11, 12])\n >>> tensor = tf.ones([8], dtype=tf.int32)\n >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates)\n >>> updated\n \n\n We can also, insert entire slices of a higher rank tensor all at once. For\n example, if we wanted to insert two slices in the first dimension of a\n rank-3 tensor with two matrices of new values.\n\n In Python, this scatter add operation would look like this:\n\n >>> indices = tf.constant([[0], [2]])\n >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n ... [7, 7, 7, 7], [8, 8, 8, 8]],\n ... [[5, 5, 5, 5], [6, 6, 6, 6],\n ... [7, 7, 7, 7], [8, 8, 8, 8]]])\n >>> tensor = tf.ones([4, 4, 4],dtype=tf.int32)\n >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates)\n >>> updated\n \n\n Note: on CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, the index is ignored.\n\n Args:\n tensor: A `Tensor`. Tensor to copy/update.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n updates: A `Tensor`. Must have the same type as `tensor`.\n Updates to scatter into output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "], ["tf.tensor_scatter_nd_max", "description: Apply a sparse update to a tensor taking the element-wise maximum.", false, "Apply a sparse update to a tensor taking the element-wise maximum.\n\n Returns a new tensor copied from `tensor` whose values are element-wise maximum between\n tensor and updates according to the indices.\n\n >>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] \n >>> indices = [[1], [4], [5]]\n >>> updates = [1, -1, 1]\n >>> tf.tensor_scatter_nd_max(tensor, indices, updates).numpy()\n array([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32)\n\n Refer to `tf.tensor_scatter_nd_update` for more details.\n\n Args:\n tensor: A `Tensor`. Tensor to update.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n updates: A `Tensor`. Must have the same type as `tensor`.\n Updates to scatter into output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "], ["tf.tensor_scatter_nd_min", "
", false, "TODO: add doc.\n\n Args:\n tensor: A `Tensor`. Tensor to update.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n updates: A `Tensor`. Must have the same type as `tensor`.\n Updates to scatter into output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "], ["tf.tensor_scatter_nd_sub", "description: Subtracts sparse updates from an existing tensor according to indices.", false, "Subtracts sparse `updates` from an existing tensor according to `indices`.\n\n This operation creates a new tensor by subtracting sparse `updates` from the\n passed in `tensor`.\n This operation is very similar to `tf.scatter_nd_sub`, except that the updates\n are subtracted from an existing tensor (as opposed to a variable). If the memory\n for the existing tensor cannot be re-used, a copy is made and updated.\n\n `indices` is an integer tensor containing indices into a new tensor of shape\n `shape`. The last dimension of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\n The last dimension of `indices` corresponds to indices into elements\n (if `indices.shape[-1] = shape.rank`) or slices\n (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n `shape`. `updates` is a tensor with shape\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\n The simplest form of tensor_scatter_sub is to subtract individual elements\n from a tensor by index. For example, say we want to insert 4 scattered elements\n in a rank-1 tensor with 8 elements.\n\n In Python, this scatter subtract operation would look like this:\n\n ```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n tensor = tf.ones([8], dtype=tf.int32)\n updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)\n print(updated)\n ```\n\n The resulting tensor would look like this:\n\n [1, -10, 1, -9, -8, 1, 1, -11]\n\n We can also, insert entire slices of a higher rank tensor all at once. For\n example, if we wanted to insert two slices in the first dimension of a\n rank-3 tensor with two matrices of new values.\n\n In Python, this scatter add operation would look like this:\n\n ```python\n indices = tf.constant([[0], [2]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n tensor = tf.ones([4, 4, 4],dtype=tf.int32)\n updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)\n print(updated)\n ```\n\n The resulting tensor would look like this:\n\n [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],\n [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]\n\n Note that on CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, the index is ignored.\n\n Args:\n tensor: A `Tensor`. Tensor to copy/update.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n updates: A `Tensor`. Must have the same type as `tensor`.\n Updates to scatter into output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "], ["tf.tensor_scatter_nd_update", "description: Scatter updates into an existing tensor according to indices.", false, "Scatter `updates` into an existing tensor according to `indices`.\n\n This operation creates a new tensor by applying sparse `updates` to the\n input `tensor`. This is similar to an index assignment.\n\n ```\n # Not implemented: tensors cannot be updated inplace.\n tensor[indices] = updates\n ```\n\n If an out of bound index is found on CPU, an error is returned.\n\n > **WARNING**: There are some GPU specific semantics for this operation.\n >\n > - If an out of bound index is found, the index is ignored.\n > - The order in which updates are applied is nondeterministic, so the output\n > will be nondeterministic if `indices` contains duplicates.\n\n This operation is very similar to `tf.scatter_nd`, except that the updates are\n scattered onto an existing tensor (as opposed to a zero-tensor). If the memory\n for the existing tensor cannot be re-used, a copy is made and updated.\n\n In general:\n\n * `indices` is an integer tensor - the indices to update in `tensor`.\n * `indices` has **at least two** axes, the last axis is the depth of the\n index vectors.\n * For each index vector in `indices` there is a corresponding entry in\n `updates`.\n * If the length of the index vectors matches the rank of the `tensor`, then\n the index vectors each point to scalars in `tensor` and each update is a\n scalar.\n * If the length of the index vectors is less than the rank of `tensor`, then\n the index vectors each point to slices of `tensor` and shape of the updates\n must match that slice.\n\n Overall this leads to the following shape constraints:\n\n ```\n assert tf.rank(indices) >= 2\n index_depth = indices.shape[-1]\n batch_shape = indices.shape[:-1]\n assert index_depth <= tf.rank(tensor)\n outer_shape = tensor.shape[:index_depth]\n inner_shape = tensor.shape[index_depth:]\n assert updates.shape == batch_shape + inner_shape\n ```\n\n Typical usage is often much simpler than this general form, and it\n can be better understood starting with simple examples:\n\n ### Scalar updates\n\n The simplest usage inserts scalar elements into a tensor by index.\n In this case, the `index_depth` must equal the rank of the\n input `tensor`, slice each column of `indices` is an index into an axis of the\n input `tensor`.\n\n In this simplest case the shape constraints are:\n\n ```\n num_updates, index_depth = indices.shape.as_list()\n assert updates.shape == [num_updates]\n assert index_depth == tf.rank(tensor)`\n ```\n\n For example, to insert 4 scattered elements in a rank-1 tensor with\n 8 elements.\n\n
\n \n
\n\n This scatter operation would look like this:\n\n >>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] # tf.rank(tensor) == 1\n >>> indices = [[1], [3], [4], [7]] # num_updates == 4, index_depth == 1\n >>> updates = [9, 10, 11, 12] # num_updates == 4\n >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates))\n tf.Tensor([ 0 9 0 10 11 0 0 12], shape=(8,), dtype=int32)\n\n The length (first axis) of `updates` must equal the length of the `indices`:\n `num_updates`. This is the number of updates being inserted. Each scalar\n update is inserted into `tensor` at the indexed location.\n\n For a higher rank input `tensor` scalar updates can be inserted by using an\n `index_depth` that matches `tf.rank(tensor)`:\n\n >>> tensor = [[1, 1], [1, 1], [1, 1]] # tf.rank(tensor) == 2\n >>> indices = [[0, 1], [2, 0]] # num_updates == 2, index_depth == 2\n >>> updates = [5, 10] # num_updates == 2\n >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates))\n tf.Tensor(\n [[ 1 5]\n [ 1 1]\n [10 1]], shape=(3, 2), dtype=int32)\n\n ### Slice updates\n\n When the input `tensor` has more than one axis scatter can be used to update\n entire slices.\n\n In this case it's helpful to think of the input `tensor` as being a two level\n array-of-arrays. The shape of this two level array is split into the\n `outer_shape` and the `inner_shape`.\n\n `indices` indexes into the outer level of the input tensor (`outer_shape`).\n and replaces the sub-array at that location with the corresponding item from\n the `updates` list. The shape of each update is `inner_shape`.\n\n When updating a list of slices the shape constraints are:\n\n ```\n num_updates, index_depth = indices.shape.as_list()\n inner_shape = tensor.shape[:index_depth]\n outer_shape = tensor.shape[index_depth:]\n assert updates.shape == [num_updates, inner_shape]\n ```\n\n For example, to update rows of a `(6, 3)` `tensor`:\n\n >>> tensor = tf.zeros([6, 3], dtype=tf.int32)\n\n Use an index depth of one.\n\n >>> indices = tf.constant([[2], [4]]) # num_updates == 2, index_depth == 1\n >>> num_updates, index_depth = indices.shape.as_list()\n\n The `outer_shape` is `6`, the inner shape is `3`:\n\n >>> outer_shape = tensor.shape[:index_depth]\n >>> inner_shape = tensor.shape[index_depth:]\n\n 2 rows are being indexed so 2 `updates` must be supplied.\n Each update must be shaped to match the `inner_shape`.\n\n >>> # num_updates == 2, inner_shape==3\n >>> updates = tf.constant([[1, 2, 3],\n ... [4, 5, 6]])\n\n Altogether this gives:\n\n >>> tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()\n array([[0, 0, 0],\n [0, 0, 0],\n [1, 2, 3],\n [0, 0, 0],\n [4, 5, 6],\n [0, 0, 0]], dtype=int32)\n\n #### More slice update examples\n\n A tensor representing a batch of uniformly sized video clips naturally has 5\n axes: `[batch_size, time, width, height, channels]`.\n\n For example:\n\n >>> batch_size, time, width, height, channels = 13,11,7,5,3\n >>> video_batch = tf.zeros([batch_size, time, width, height, channels])\n\n To replace a selection of video clips:\n * Use an `index_depth` of 1 (indexing the `outer_shape`: `[batch_size]`)\n * Provide updates each with a shape matching the `inner_shape`:\n `[time, width, height, channels]`.\n\n To replace the first two clips with ones:\n\n >>> indices = [[0],[1]]\n >>> new_clips = tf.ones([2, time, width, height, channels])\n >>> tf.tensor_scatter_nd_update(video_batch, indices, new_clips)\n\n To replace a selection of frames in the videos:\n\n * `indices` must have an `index_depth` of 2 for the `outer_shape`:\n `[batch_size, time]`.\n * `updates` must be shaped like a list of images. Each update must have a\n shape, matching the `inner_shape`: `[width, height, channels]`.\n\n To replace the first frame of the first three video clips:\n\n >>> indices = [[0, 0], [1, 0], [2, 0]] # num_updates=3, index_depth=2\n >>> new_images = tf.ones([\n ... # num_updates=3, inner_shape=(width, height, channels)\n ... 3, width, height, channels])\n >>> tf.tensor_scatter_nd_update(video_batch, indices, new_images)\n\n ### Folded indices\n\n In simple cases it's convenient to think of `indices` and `updates` as\n lists, but this is not a strict requirement. Instead of a flat `num_updates`,\n the `indices` and `updates` can be folded into a `batch_shape`. This\n `batch_shape` is all axes of the `indices`, except for the innermost\n `index_depth` axis.\n\n ```\n index_depth = indices.shape[-1]\n batch_shape = indices.shape[:-1]\n ```\n\n Note: The one exception is that the `batch_shape` cannot be `[]`. You can't\n update a single index by passing indices with shape `[index_depth]`.\n\n `updates` must have a matching `batch_shape` (the axes before `inner_shape`).\n\n ```\n assert updates.shape == batch_shape + inner_shape\n ```\n\n Note: The result is equivalent to flattening the `batch_shape` axes of\n `indices` and `updates`. This generalization just avoids the need\n for reshapes when it is more natural to construct \"folded\" indices and\n updates.\n\n With this generalization the full shape constraints are:\n\n ```\n assert tf.rank(indices) >= 2\n index_depth = indices.shape[-1]\n batch_shape = indices.shape[:-1]\n assert index_depth <= tf.rank(tensor)\n outer_shape = tensor.shape[:index_depth]\n inner_shape = tensor.shape[index_depth:]\n assert updates.shape == batch_shape + inner_shape\n ```\n\n For example, to draw an `X` on a `(5,5)` matrix start with these indices:\n\n >>> tensor = tf.zeros([5,5])\n >>> indices = tf.constant([\n ... [[0,0],\n ... [1,1],\n ... [2,2],\n ... [3,3],\n ... [4,4]],\n ... [[0,4],\n ... [1,3],\n ... [2,2],\n ... [3,1],\n ... [4,0]],\n ... ])\n >>> indices.shape.as_list() # batch_shape == [2, 5], index_depth == 2\n [2, 5, 2]\n\n Here the `indices` do not have a shape of `[num_updates, index_depth]`, but a\n shape of `batch_shape+[index_depth]`.\n\n Since the `index_depth` is equal to the rank of `tensor`:\n\n * `outer_shape` is `(5,5)`\n * `inner_shape` is `()` - each update is scalar\n * `updates.shape` is `batch_shape + inner_shape == (5,2) + ()`\n\n >>> updates = [\n ... [1,1,1,1,1],\n ... [1,1,1,1,1],\n ... ]\n\n Putting this together gives:\n\n >>> tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()\n array([[1., 0., 0., 0., 1.],\n [0., 1., 0., 1., 0.],\n [0., 0., 1., 0., 0.],\n [0., 1., 0., 1., 0.],\n [1., 0., 0., 0., 1.]], dtype=float32)\n\n Args:\n tensor: Tensor to copy/update.\n indices: Indices to update.\n updates: Updates to apply at the indices.\n name: Optional name for the operation.\n\n Returns:\n A new tensor with the given shape and updates applied according to the\n indices.\n "], ["tf.test", "description: Testing.", true, "Testing.\n"], ["tf.tile", "description: Constructs a tensor by tiling a given tensor.", false, "Constructs a tensor by tiling a given tensor.\n\n This operation creates a new tensor by replicating `input` `multiples` times.\n The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,\n and the values of `input` are replicated `multiples[i]` times along the 'i'th\n dimension. For example, tiling `[a b c d]` by `[2]` produces\n `[a b c d a b c d]`.\n\n >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)\n >>> b = tf.constant([1,2], tf.int32)\n >>> tf.tile(a, b)\n \n >>> c = tf.constant([2,1], tf.int32)\n >>> tf.tile(a, c)\n \n >>> d = tf.constant([2,2], tf.int32)\n >>> tf.tile(a, d)\n \n\n Args:\n input: A `Tensor`. 1-D or higher.\n multiples: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 1-D. Length must be the same as the number of dimensions in `input`\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "], ["tf.timestamp", "description: Provides the time since epoch in seconds.", false, "Provides the time since epoch in seconds.\n\n Returns the timestamp as a `float64` for seconds since the Unix epoch.\n\n Note: the timestamp is computed when the op is executed, not when it is added\n to the graph.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float64`.\n "], ["tf.tpu", "description: Ops related to Tensor Processing Units.", true, "Ops related to Tensor Processing Units.\n"], ["tf.train", "description: Support for training models.", true, "Support for training models.\n\nSee the [Training](https://tensorflow.org/api_guides/python/train) guide.\n\n"], ["tf.transpose", "description: Transposes a, where a is a Tensor.", false, "Transposes `a`, where `a` is a Tensor.\n\n Permutes the dimensions according to the value of `perm`.\n\n The returned tensor's dimension `i` will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank\n of the input tensor. Hence by default, this operation performs a regular\n matrix transpose on 2-D input Tensors.\n\n If conjugate is `True` and `a.dtype` is either `complex64` or `complex128`\n then the values of `a` are conjugated and transposed.\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, so `transpose` returns a new tensor with\n the items permuted.\n @end_compatibility\n\n For example:\n\n >>> x = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.transpose(x)\n \n\n Equivalently, you could call `tf.transpose(x, perm=[1, 0])`.\n\n If `x` is complex, setting conjugate=True gives the conjugate transpose:\n\n >>> x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n ... [4 + 4j, 5 + 5j, 6 + 6j]])\n >>> tf.transpose(x, conjugate=True)\n \n\n 'perm' is more useful for n-dimensional tensors where n > 2:\n\n >>> x = tf.constant([[[ 1, 2, 3],\n ... [ 4, 5, 6]],\n ... [[ 7, 8, 9],\n ... [10, 11, 12]]])\n\n As above, simply calling `tf.transpose` will default to `perm=[2,1,0]`.\n\n To take the transpose of the matrices in dimension-0 (such as when you are\n transposing matrices where 0 is the batch dimension), you would set\n `perm=[0,2,1]`.\n\n >>> tf.transpose(x, perm=[0, 2, 1])\n \n\n Note: This has a shorthand `linalg.matrix_transpose`):\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`. This should be a vector.\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.math.conj(tf.transpose(input)).\n name: A name for the operation (optional).\n\n Returns:\n A transposed `Tensor`.\n "], ["tf.truncatediv", "description: Returns x / y element-wise for integer types.", false, "Returns x / y element-wise for integer types.\n\n Truncation designates that negative numbers will round fractional quantities\n toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different\n than Python semantics. See `FloorDiv` for a division function that matches\n Python Semantics.\n\n *NOTE*: `truncatediv` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "], ["tf.truncatemod", "description: Returns element-wise remainder of division. This emulates C semantics in that", false, "Returns element-wise remainder of division. This emulates C semantics in that\n\n the result here is consistent with a truncating divide. E.g. `truncate(x / y) *\n y + truncate_mod(x, y) = x`.\n\n *NOTE*: `truncatemod` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Args:\n x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "], ["tf.tuple", "description: Groups tensors together.", false, "Groups tensors together.\n\n The returned tensors have the same value as the input tensors, but they\n are computed only after all the input tensors have been computed.\n\n Note: *In TensorFlow 2 with eager and/or Autograph, you should not require\n this method, as ops execute in the expected order thanks to automatic control\n dependencies.* Only use `tf.tuple` when working with v1 `tf.Graph` code.\n\n See also `tf.group` and `tf.control_dependencies`.\n\n Args:\n tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.\n control_inputs: List of additional ops to finish before returning.\n name: (optional) A name to use as a `name_scope` for the operation.\n\n Returns:\n Same as `tensors`.\n\n Raises:\n ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.\n TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`\n objects.\n\n "], ["tf.types", "description: Public TensorFlow type definitions.", true, "Public TensorFlow type definitions.\n\nFor details, see\nhttps://github.com/tensorflow/community/blob/master/rfcs/20200211-tf-types.md.\n\n"], ["tf.TypeSpec", "description: Specifies a TensorFlow value type.", false, "Specifies a TensorFlow value type.\n\n A `tf.TypeSpec` provides metadata describing an object accepted or returned\n by TensorFlow APIs. Concrete subclasses, such as `tf.TensorSpec` and\n `tf.RaggedTensorSpec`, are used to describe different value types.\n\n For example, `tf.function`'s `input_signature` argument accepts a list\n (or nested structure) of `TypeSpec`s.\n\n Creating new subclasses of `TypeSpec` (outside of TensorFlow core) is not\n currently supported. In particular, we may make breaking changes to the\n private methods and properties defined by this base class.\n\n Example:\n\n >>> spec = tf.RaggedTensorSpec(shape=[None, None], dtype=tf.int32)\n >>> @tf.function(input_signature=[spec])\n ... def double(x):\n ... return x * 2\n >>> print(double(tf.ragged.constant([[1, 2], [3]])))\n \n "], ["tf.type_spec_from_value", "description: Returns a tf.TypeSpec that represents the given value.", false, "Returns a `tf.TypeSpec` that represents the given `value`.\n\n Examples:\n\n >>> tf.type_spec_from_value(tf.constant([1, 2, 3]))\n TensorSpec(shape=(3,), dtype=tf.int32, name=None)\n >>> tf.type_spec_from_value(np.array([4.0, 5.0], np.float64))\n TensorSpec(shape=(2,), dtype=tf.float64, name=None)\n >>> tf.type_spec_from_value(tf.ragged.constant([[1, 2], [3, 4, 5]]))\n RaggedTensorSpec(TensorShape([2, None]), tf.int32, 1, tf.int64)\n\n >>> example_input = tf.ragged.constant([[1, 2], [3]])\n >>> @tf.function(input_signature=[tf.type_spec_from_value(example_input)])\n ... def f(x):\n ... return tf.reduce_sum(x, axis=1)\n\n Args:\n value: A value that can be accepted or returned by TensorFlow APIs. Accepted\n types for `value` include `tf.Tensor`, any value that can be converted to\n `tf.Tensor` using `tf.convert_to_tensor`, and any subclass of\n `CompositeTensor` (such as `tf.RaggedTensor`).\n\n Returns:\n A `TypeSpec` that is compatible with `value`.\n\n Raises:\n TypeError: If a TypeSpec cannot be built for `value`, because its type\n is not supported.\n "], ["tf.UnconnectedGradients", "description: Controls how gradient computation behaves when y does not depend on x.", false, "Controls how gradient computation behaves when y does not depend on x.\n\n The gradient of y with respect to x can be zero in two different ways: there\n could be no differentiable path in the graph connecting x to y (and so we can\n statically prove that the gradient is zero) or it could be that runtime values\n of tensors in a particular execution lead to a gradient of zero (say, if a\n relu unit happens to not be activated). To allow you to distinguish between\n these two cases you can choose what value gets returned for the gradient when\n there is no path in the graph from x to y:\n\n * `NONE`: Indicates that [None] will be returned if there is no path from x\n to y\n * `ZERO`: Indicates that a zero tensor will be returned in the shape of x.\n "], ["tf.unique", "description: Finds unique elements in a 1-D tensor.", false, "Finds unique elements in a 1-D tensor.\n\n This operation returns a tensor `y` containing all of the unique elements of `x`\n sorted in the same order that they occur in `x`; `x` does not need to be sorted.\n This operation also returns a tensor `idx` the same size as `x` that contains\n the index of each value of `x` in the unique output `y`. In other words:\n\n `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\n Examples:\n\n ```\n # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\n y, idx = unique(x)\n y ==> [1, 2, 4, 7, 8]\n idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\n ```\n\n ```\n # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]\n y, idx = unique(x)\n y ==> [4, 5, 1, 2, 3]\n idx ==> [0, 1, 2, 3, 4, 4, 0, 1]\n ```\n\n Args:\n x: A `Tensor`. 1-D.\n out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (y, idx).\n\n y: A `Tensor`. Has the same type as `x`.\n idx: A `Tensor` of type `out_idx`.\n "], ["tf.unique_with_counts", "description: Finds unique elements in a 1-D tensor.", false, "Finds unique elements in a 1-D tensor.\n\n This operation returns a tensor `y` containing all of the unique elements of `x`\n sorted in the same order that they occur in `x`. This operation also returns a\n tensor `idx` the same size as `x` that contains the index of each value of `x`\n in the unique output `y`. Finally, it returns a third tensor `count` that\n contains the count of each element of `y` in `x`. In other words:\n\n `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\n For example:\n\n ```\n # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\n y, idx, count = unique_with_counts(x)\n y ==> [1, 2, 4, 7, 8]\n idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\n count ==> [2, 1, 3, 1, 2]\n ```\n\n Args:\n x: A `Tensor`. 1-D.\n out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (y, idx, count).\n\n y: A `Tensor`. Has the same type as `x`.\n idx: A `Tensor` of type `out_idx`.\n count: A `Tensor` of type `out_idx`.\n "], ["tf.unravel_index", "description: Converts an array of flat indices into a tuple of coordinate arrays.", false, "Converts an array of flat indices into a tuple of coordinate arrays.\n\n \n Example:\n\n ```\n y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])\n # 'dims' represent a hypothetical (3, 3) tensor of indices:\n # [[0, 1, *2*],\n # [3, 4, *5*],\n # [6, *7*, 8]]\n # For each entry from 'indices', this operation returns\n # its coordinates (marked with '*'), such as\n # 2 ==> (0, 2)\n # 5 ==> (1, 2)\n # 7 ==> (2, 1)\n y ==> [[0, 1, 2], [2, 2, 1]]\n ```\n\n @compatibility(numpy)\n Equivalent to np.unravel_index\n @end_compatibility\n\n Args:\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n An 0-D or 1-D `int` Tensor whose elements are indices into the\n flattened version of an array of dimensions dims.\n dims: A `Tensor`. Must have the same type as `indices`.\n An 1-D `int` Tensor. The shape of the array to use for unraveling\n indices.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `indices`.\n "], ["tf.unstack", "description: Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.", false, "Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.\n\n Unpacks tensors from `value` by chipping it along the `axis` dimension.\n\n >>> x = tf.reshape(tf.range(12), (3,4))\n >>>\n >>> p, q, r = tf.unstack(x)\n >>> p.shape.as_list()\n [4]\n\n >>> i, j, k, l = tf.unstack(x, axis=1)\n >>> i.shape.as_list()\n [3]\n\n This is the opposite of stack.\n\n >>> x = tf.stack([i, j, k, l], axis=1)\n\n More generally if you have a tensor of shape `(A, B, C, D)`:\n\n >>> A, B, C, D = [2, 3, 4, 5]\n >>> t = tf.random.normal(shape=[A, B, C, D])\n\n The number of tensor returned is equal to the length of the target `axis`:\n\n >>> axis = 2\n >>> items = tf.unstack(t, axis=axis)\n >>> len(items) == t.shape[axis]\n True\n\n The shape of each result tensor is equal to the shape of the input tensor,\n with the target `axis` removed.\n\n >>> items[0].shape.as_list() # [A, B, D]\n [2, 3, 5]\n\n The value of each tensor `items[i]` is equal to the slice of `input` across\n `axis` at index `i`:\n\n >>> for i in range(len(items)):\n ... slice = t[:,:,i,:]\n ... assert tf.reduce_all(slice == items[i])\n\n #### Python iterable unpacking\n\n With eager execution you _can_ unstack the 0th axis of a tensor using python's\n iterable unpacking:\n\n >>> t = tf.constant([1,2,3])\n >>> a,b,c = t\n\n `unstack` is still necessary because Iterable unpacking doesn't work in\n a `@tf.function`: Symbolic tensors are not iterable.\n\n You need to use `tf.unstack` here:\n\n >>> @tf.function\n ... def bad(t):\n ... a,b,c = t\n ... return a\n >>>\n >>> bad(t)\n Traceback (most recent call last):\n ...\n OperatorNotAllowedInGraphError: ...\n\n >>> @tf.function\n ... def good(t):\n ... a,b,c = tf.unstack(t)\n ... return a\n >>>\n >>> good(t).numpy()\n 1\n\n #### Unknown shapes\n\n Eager tensors have concrete values, so their shape is always known.\n Inside a `tf.function` the symbolic tensors may have unknown shapes.\n If the length of `axis` is unknown `tf.unstack` will fail because it cannot\n handle an unknown number of tensors:\n\n >>> @tf.function(input_signature=[tf.TensorSpec([None], tf.float32)])\n ... def bad(t):\n ... tensors = tf.unstack(t)\n ... return tensors[0]\n >>>\n >>> bad(tf.constant([1,2,3]))\n Traceback (most recent call last):\n ...\n ValueError: Cannot infer argument `num` from shape (None,)\n\n If you know the `axis` length you can pass it as the `num` argument. But this\n must be a constant value.\n\n If you actually need a variable number of tensors in a single `tf.function`\n trace, you will need to use exlicit loops and a `tf.TensorArray` instead.\n\n Args:\n value: A rank `R > 0` `Tensor` to be unstacked.\n num: An `int`. The length of the dimension `axis`. Automatically inferred if\n `None` (the default).\n axis: An `int`. The axis to unstack along. Defaults to the first dimension.\n Negative values wrap around, so the valid range is `[-R, R)`.\n name: A name for the operation (optional).\n\n Returns:\n The list of `Tensor` objects unstacked from `value`.\n\n Raises:\n ValueError: If `axis` is out of the range `[-R, R)`.\n ValueError: If `num` is unspecified and cannot be inferred.\n InvalidArgumentError: If `num` does not match the shape of `value`.\n "], ["tf.Variable", "description: See the [variable guide](https://tensorflow.org/guide/variable).", true, "See the [variable guide](https://tensorflow.org/guide/variable).\n\n A variable maintains shared, persistent state manipulated by a program.\n\n The `Variable()` constructor requires an initial value for the variable, which\n can be a `Tensor` of any type and shape. This initial value defines the type\n and shape of the variable. After construction, the type and shape of the\n variable are fixed. The value can be changed using one of the assign methods.\n\n >>> v = tf.Variable(1.)\n >>> v.assign(2.)\n \n >>> v.assign_add(0.5)\n \n\n The `shape` argument to `Variable`'s constructor allows you to construct a\n variable with a less defined shape than its `initial_value`:\n\n >>> v = tf.Variable(1., shape=tf.TensorShape(None))\n >>> v.assign([[1.]])\n dtype=float32, numpy=array([[1.]], ...)>\n\n Just like any `Tensor`, variables created with `Variable()` can be used as\n inputs to operations. Additionally, all the operators overloaded for the\n `Tensor` class are carried over to variables.\n\n >>> w = tf.Variable([[1.], [2.]])\n >>> x = tf.constant([[3., 4.]])\n >>> tf.matmul(w, x)\n \n >>> tf.sigmoid(w + x)\n \n\n When building a machine learning model it is often convenient to distinguish\n between variables holding trainable model parameters and other variables such\n as a `step` variable used to count training steps. To make this easier, the\n variable constructor supports a `trainable=`\n parameter. `tf.GradientTape` watches trainable variables by default:\n\n >>> with tf.GradientTape(persistent=True) as tape:\n ... trainable = tf.Variable(1.)\n ... non_trainable = tf.Variable(2., trainable=False)\n ... x1 = trainable * 2.\n ... x2 = non_trainable * 3.\n >>> tape.gradient(x1, trainable)\n \n >>> assert tape.gradient(x2, non_trainable) is None # Unwatched\n\n Variables are automatically tracked when assigned to attributes of types\n inheriting from `tf.Module`.\n\n >>> m = tf.Module()\n >>> m.v = tf.Variable([1.])\n >>> m.trainable_variables\n (,)\n\n This tracking then allows saving variable values to\n [training checkpoints](https://www.tensorflow.org/guide/checkpoint), or to\n [SavedModels](https://www.tensorflow.org/guide/saved_model) which include\n serialized TensorFlow graphs.\n\n Variables are often captured and manipulated by `tf.function`s. This works the\n same way the un-decorated function would have:\n\n >>> v = tf.Variable(0.)\n >>> read_and_decrement = tf.function(lambda: v.assign_sub(0.1))\n >>> read_and_decrement()\n \n >>> read_and_decrement()\n \n\n Variables created inside a `tf.function` must be owned outside the function\n and be created only once:\n\n >>> class M(tf.Module):\n ... @tf.function\n ... def __call__(self, x):\n ... if not hasattr(self, \"v\"): # Or set self.v to None in __init__\n ... self.v = tf.Variable(x)\n ... return self.v * x\n >>> m = M()\n >>> m(2.)\n \n >>> m(3.)\n \n >>> m.v\n \n\n See the `tf.function` documentation for details.\n "], ["tf.VariableAggregation", "description: Indicates how a distributed variable will be aggregated.", false, "Indicates how a distributed variable will be aggregated.\n\n `tf.distribute.Strategy` distributes a model by making multiple copies\n (called \"replicas\") acting data-parallel on different elements of the input\n batch. When performing some variable-update operation, say\n `var.assign_add(x)`, in a model, we need to resolve how to combine the\n different values for `x` computed in the different replicas.\n\n * `NONE`: This is the default, giving an error if you use a\n variable-update operation with multiple replicas.\n * `SUM`: Add the updates across replicas.\n * `MEAN`: Take the arithmetic mean (\"average\") of the updates across replicas.\n * `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same\n update, but we only want to perform the update once. Used, e.g., for the\n global step counter.\n "], ["tf.VariableSynchronization", "description: Indicates when a distributed variable will be synced.", false, "Indicates when a distributed variable will be synced.\n\n * `AUTO`: Indicates that the synchronization will be determined by the current\n `DistributionStrategy` (eg. With `MirroredStrategy` this would be\n `ON_WRITE`).\n * `NONE`: Indicates that there will only be one copy of the variable, so\n there is no need to sync.\n * `ON_WRITE`: Indicates that the variable will be updated across devices\n every time it is written.\n * `ON_READ`: Indicates that the variable will be aggregated across devices\n when it is read (eg. when checkpointing or when evaluating an op that uses\n the variable).\n\n Example:\n >>> temp_grad=[tf.Variable([0.], trainable=False,\n ... synchronization=tf.VariableSynchronization.ON_READ,\n ... aggregation=tf.VariableAggregation.MEAN\n ... )]\n "], ["tf.variable_creator_scope", "description: Scope which defines a variable creation function to be used by variable().", false, "Scope which defines a variable creation function to be used by variable().\n\n variable_creator is expected to be a function with the following signature:\n\n ```\n def variable_creator(next_creator, **kwargs)\n ```\n\n The creator is supposed to eventually call the next_creator to create a\n variable if it does want to create a variable and not call Variable or\n ResourceVariable directly. This helps make creators composable. A creator may\n choose to create multiple variables, return already existing variables, or\n simply register that a variable was created and defer to the next creators in\n line. Creators can also modify the keyword arguments seen by the next\n creators.\n\n Custom getters in the variable scope will eventually resolve down to these\n custom creators when they do create variables.\n\n The valid keyword arguments in kwds are:\n\n * initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n * trainable: If `True`, the default, GradientTapes automatically watch\n uses of this Variable.\n * validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n * caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device.\n If not `None`, caches on another device. Typical use is to cache\n on the device where the Ops using the Variable reside, to deduplicate\n copying through `Switch` and other conditional statements.\n * name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n dtype: If set, initial_value will be converted to the given type.\n If `None`, either the datatype will be kept (if `initial_value` is\n a Tensor), or `convert_to_tensor` will decide.\n * constraint: A constraint function to be applied to the variable after\n updates by some algorithms.\n * synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize.\n * aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n\n This set may grow over time, so it's important the signature of creators is as\n mentioned above.\n\n Args:\n variable_creator: the passed creator\n\n Yields:\n A scope in which the creator is active\n "], ["tf.vectorized_map", "description: Parallel map on the list of tensors unpacked from elems on dimension 0.", false, "Parallel map on the list of tensors unpacked from `elems` on dimension 0.\n\n This method works similar to `tf.map_fn` but is optimized to run much faster,\n possibly with a much larger memory footprint. The speedups are obtained by\n vectorization (see [Auto-Vectorizing TensorFlow Graphs: Jacobians,\n Auto-Batching and Beyond](https://arxiv.org/pdf/1903.04243.pdf)). The idea\n behind vectorization is to semantically launch all the invocations of `fn` in\n parallel and fuse corresponding operations across all these invocations. This\n fusion is done statically at graph generation time and the generated code is\n often similar in performance to a manually fused version.\n\n Because `tf.vectorized_map` fully parallelizes the batch, this method will\n generally be significantly faster than using `tf.map_fn`, especially in eager\n mode. However this is an experimental feature and currently has a lot of\n limitations:\n - There should be no data dependency between the different semantic\n invocations of `fn`, i.e. it should be safe to map the elements of the\n inputs in any order.\n - Stateful kernels may mostly not be supported since these often imply a\n data dependency. We do support a limited set of such stateful kernels\n though (like RandomFoo, Variable operations like reads, etc).\n - `fn` has limited support for control flow operations.\n - `fn` should return nested structure of Tensors or Operations. However\n if an Operation is returned, it should have zero outputs.\n - The shape and dtype of any intermediate or output tensors in the\n computation of `fn` should not depend on the input to `fn`.\n\n Examples:\n ```python\n def outer_product(a):\n return tf.tensordot(a, a, 0)\n\n batch_size = 100\n a = tf.ones((batch_size, 32, 32))\n c = tf.vectorized_map(outer_product, a)\n assert c.shape == (batch_size, 32, 32, 32, 32)\n ```\n\n ```python\n # Computing per-example gradients\n\n batch_size = 10\n num_features = 32\n layer = tf.keras.layers.Dense(1)\n\n def model_fn(arg):\n with tf.GradientTape() as g:\n inp, label = arg\n inp = tf.expand_dims(inp, 0)\n label = tf.expand_dims(label, 0)\n prediction = layer(inp)\n loss = tf.nn.l2_loss(label - prediction)\n return g.gradient(loss, (layer.kernel, layer.bias))\n\n inputs = tf.random.uniform([batch_size, num_features])\n labels = tf.random.uniform([batch_size, 1])\n per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels))\n assert per_example_gradients[0].shape == (batch_size, num_features, 1)\n assert per_example_gradients[1].shape == (batch_size, 1)\n ```\n\n Args:\n fn: The callable to be performed. It accepts one argument, which will have\n the same (possibly nested) structure as `elems`, and returns a possibly\n nested structure of Tensors and Operations, which may be different than\n the structure of `elems`.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unpacked along their first dimension. The nested sequence of the\n resulting slices will be mapped over by `fn`. The first dimensions of all\n elements must broadcast to a consistent value; equivalently, each\n element tensor must have first dimension of either `B` or `1`, for some\n common batch size `B >= 1`.\n fallback_to_while_loop: If true, on failing to vectorize an operation,\n the unsupported op is wrapped in a tf.while_loop to execute the map\n iterations. Note that this fallback only happens for unsupported ops and\n other parts of `fn` are still vectorized. If false, on encountering an\n unsupported op, a ValueError is thrown. Note that the fallbacks can result\n in slowdowns since vectorization often yields speedup of one to two orders\n of magnitude.\n\n Returns:\n A tensor or (possibly nested) sequence of tensors. Each tensor packs the\n results of applying fn to tensors unpacked from elems along the first\n dimension, from first to last.\n\n Although they are less common as user-visible inputs and outputs, note that\n tensors of type `tf.variant` which represent tensor lists (for example from\n `tf.raw_ops.TensorListFromTensor`) are vectorized by stacking the list\n contents rather than the variant itself, and so the container tensor will\n have a scalar shape when returned rather than the usual stacked shape. This\n improves the performance of control flow gradient vectorization.\n\n Raises:\n ValueError: If vectorization fails and fallback_to_while_loop is False.\n "], ["tf.version", "description: Public API for tf.version namespace.", false, "Public API for tf.version namespace.\n"], ["tf.where", "description: Returns the indices of non-zero elements, or multiplexes x and y.", false, "Returns the indices of non-zero elements, or multiplexes `x` and `y`.\n\n This operation has two modes:\n\n 1. **Return the indices of non-zero elements** - When only\n `condition` is provided the result is an `int64` tensor where each row is\n the index of a non-zero element of `condition`. The result's shape\n is `[tf.math.count_nonzero(condition), tf.rank(condition)]`.\n 2. **Multiplex `x` and `y`** - When both `x` and `y` are provided the\n result has the shape of `x`, `y`, and `condition` broadcast together. The\n result is taken from `x` where `condition` is non-zero\n or `y` where `condition` is zero.\n\n #### 1. Return the indices of non-zero elements\n\n Note: In this mode `condition` can have a dtype of `bool` or any numeric\n dtype.\n\n If `x` and `y` are not provided (both are None):\n\n `tf.where` will return the indices of `condition` that are non-zero,\n in the form of a 2-D tensor with shape `[n, d]`, where `n` is the number of\n non-zero elements in `condition` (`tf.count_nonzero(condition)`), and `d` is\n the number of axes of `condition` (`tf.rank(condition)`).\n\n Indices are output in row-major order. The `condition` can have a `dtype` of\n `tf.bool`, or any numeric `dtype`.\n\n Here `condition` is a 1-axis `bool` tensor with 2 `True` values. The result\n has a shape of `[2,1]`\n\n >>> tf.where([True, False, False, True]).numpy()\n array([[0],\n [3]])\n\n Here `condition` is a 2-axis integer tensor, with 3 non-zero values. The\n result has a shape of `[3, 2]`.\n\n >>> tf.where([[1, 0, 0], [1, 0, 1]]).numpy()\n array([[0, 0],\n [1, 0],\n [1, 2]])\n\n Here `condition` is a 3-axis float tensor, with 5 non-zero values. The output\n shape is `[5, 3]`.\n\n >>> float_tensor = [[[0.1, 0], [0, 2.2], [3.5, 1e6]],\n ... [[0, 0], [0, 0], [99, 0]]]\n >>> tf.where(float_tensor).numpy()\n array([[0, 0, 0],\n [0, 1, 1],\n [0, 2, 0],\n [0, 2, 1],\n [1, 2, 0]])\n\n These indices are the same that `tf.sparse.SparseTensor` would use to\n represent the condition tensor:\n\n >>> sparse = tf.sparse.from_dense(float_tensor)\n >>> sparse.indices.numpy()\n array([[0, 0, 0],\n [0, 1, 1],\n [0, 2, 0],\n [0, 2, 1],\n [1, 2, 0]])\n\n A complex number is considered non-zero if either the real or imaginary\n component is non-zero:\n\n >>> tf.where([complex(0.), complex(1.), 0+1j, 1+1j]).numpy()\n array([[1],\n [2],\n [3]])\n\n #### 2. Multiplex `x` and `y`\n\n Note: In this mode `condition` must have a dtype of `bool`.\n\n If `x` and `y` are also provided (both have non-None values) the `condition`\n tensor acts as a mask that chooses whether the corresponding\n element / row in the output should be taken from `x` (if the element in\n `condition` is `True`) or `y` (if it is `False`).\n\n The shape of the result is formed by\n [broadcasting](https://docs.scipy.org/doc/numpy/reference/ufuncs.html)\n together the shapes of `condition`, `x`, and `y`.\n\n When all three inputs have the same size, each is handled element-wise.\n\n >>> tf.where([True, False, False, True],\n ... [1, 2, 3, 4],\n ... [100, 200, 300, 400]).numpy()\n array([ 1, 200, 300, 4], dtype=int32)\n\n There are two main rules for broadcasting:\n\n 1. If a tensor has fewer axes than the others, length-1 axes are added to the\n left of the shape.\n 2. Axes with length-1 are streched to match the coresponding axes of the other\n tensors.\n\n A length-1 vector is streched to match the other vectors:\n\n >>> tf.where([True, False, False, True], [1, 2, 3, 4], [100]).numpy()\n array([ 1, 100, 100, 4], dtype=int32)\n\n A scalar is expanded to match the other arguments:\n\n >>> tf.where([[True, False], [False, True]], [[1, 2], [3, 4]], 100).numpy()\n array([[ 1, 100], [100, 4]], dtype=int32)\n >>> tf.where([[True, False], [False, True]], 1, 100).numpy()\n array([[ 1, 100], [100, 1]], dtype=int32)\n\n A scalar `condition` returns the complete `x` or `y` tensor, with\n broadcasting applied.\n\n >>> tf.where(True, [1, 2, 3, 4], 100).numpy()\n array([1, 2, 3, 4], dtype=int32)\n >>> tf.where(False, [1, 2, 3, 4], 100).numpy()\n array([100, 100, 100, 100], dtype=int32)\n\n For a non-trivial example of broadcasting, here `condition` has a shape of\n `[3]`, `x` has a shape of `[3,3]`, and `y` has a shape of `[3,1]`.\n Broadcasting first expands the shape of `condition` to `[1,3]`. The final\n broadcast shape is `[3,3]`. `condition` will select columns from `x` and `y`.\n Since `y` only has one column, all columns from `y` will be identical.\n\n >>> tf.where([True, False, True],\n ... x=[[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9]],\n ... y=[[100],\n ... [200],\n ... [300]]\n ... ).numpy()\n array([[ 1, 100, 3],\n [ 4, 200, 6],\n [ 7, 300, 9]], dtype=int32)\n\n Note that if the gradient of either branch of the `tf.where` generates\n a `NaN`, then the gradient of the entire `tf.where` will be `NaN`. This is\n because the gradient calculation for `tf.where` combines the two branches, for\n performance reasons.\n\n A workaround is to use an inner `tf.where` to ensure the function has\n no asymptote, and to avoid computing a value whose gradient is `NaN` by\n replacing dangerous inputs with safe inputs.\n\n Instead of this,\n\n >>> x = tf.constant(0., dtype=tf.float32)\n >>> with tf.GradientTape() as tape:\n ... tape.watch(x)\n ... y = tf.where(x < 1., 0., 1. / x)\n >>> print(tape.gradient(y, x))\n tf.Tensor(nan, shape=(), dtype=float32)\n\n Although, the `1. / x` values are never used, its gradient is a `NaN` when\n `x = 0`. Instead, we should guard that with another `tf.where`\n\n >>> x = tf.constant(0., dtype=tf.float32)\n >>> with tf.GradientTape() as tape:\n ... tape.watch(x)\n ... safe_x = tf.where(tf.equal(x, 0.), 1., x)\n ... y = tf.where(x < 1., 0., 1. / safe_x)\n >>> print(tape.gradient(y, x))\n tf.Tensor(0.0, shape=(), dtype=float32)\n\n See also:\n\n * `tf.sparse` - The indices returned by the first form of `tf.where` can be\n useful in `tf.sparse.SparseTensor` objects.\n * `tf.gather_nd`, `tf.scatter_nd`, and related ops - Given the\n list of indices returned from `tf.where` the `scatter` and `gather` family\n of ops can be used fetch values or insert values at those indices.\n * `tf.strings.length` - `tf.string` is not an allowed dtype for the\n `condition`. Use the string length instead.\n\n Args:\n condition: A `tf.Tensor` of dtype bool, or any numeric dtype. `condition`\n must have dtype `bool` when `x` and `y` are provided.\n x: If provided, a Tensor which is of the same type as `y`, and has a shape\n broadcastable with `condition` and `y`.\n y: If provided, a Tensor which is of the same type as `x`, and has a shape\n broadcastable with `condition` and `x`.\n name: A name of the operation (optional).\n\n Returns:\n If `x` and `y` are provided:\n A `Tensor` with the same type as `x` and `y`, and shape that\n is broadcast from `condition`, `x`, and `y`.\n Otherwise, a `Tensor` with shape `[tf.math.count_nonzero(condition),\n tf.rank(condition)]`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-None, or the shapes\n are not all broadcastable.\n "], ["tf.while_loop", "description: Repeat body while the condition cond is true. (deprecated argument values)", false, "Repeat `body` while the condition `cond` is true. (deprecated argument values)\n\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(back_prop=False)`. They will be removed in a future version.\nInstructions for updating:\nback_prop=False is deprecated. Consider using tf.stop_gradient instead.\nInstead of:\nresults = tf.while_loop(c, b, vars, back_prop=False)\nUse:\nresults = tf.nest.map_structure(tf.stop_gradient, tf.while_loop(c, b, vars))\n\n`cond` is a callable returning a boolean scalar tensor. `body` is a callable\nreturning a (possibly nested) tuple, namedtuple or list of tensors of the same\narity (length and structure) and types as `loop_vars`. `loop_vars` is a\n(possibly nested) tuple, namedtuple or list of tensors that is passed to both\n`cond` and `body`. `cond` and `body` both take as many arguments as there are\n`loop_vars`.\n\nIn addition to regular Tensors or IndexedSlices, the body may accept and\nreturn TensorArray objects. The flows of the TensorArray objects will\nbe appropriately forwarded between loops and during gradient calculations.\n\nNote that `while_loop` calls `cond` and `body` *exactly once* (inside the\ncall to `while_loop`, and not at all during `Session.run()`). `while_loop`\nstitches together the graph fragments created during the `cond` and `body`\ncalls with some additional graph nodes to create the graph flow that\nrepeats `body` until `cond` returns false.\n\nFor correctness, `tf.while_loop()` strictly enforces shape invariants for\nthe loop variables. A shape invariant is a (possibly partial) shape that\nis unchanged across the iterations of the loop. An error will be raised\nif the shape of a loop variable after an iteration is determined to be more\ngeneral than or incompatible with its shape invariant. For example, a shape\nof [11, None] is more general than a shape of [11, 17], and [11, 21] is not\ncompatible with [11, 17]. By default (if the argument `shape_invariants` is\nnot specified), it is assumed that the initial shape of each tensor in\n`loop_vars` is the same in every iteration. The `shape_invariants` argument\nallows the caller to specify a less specific shape invariant for each loop\nvariable, which is needed if the shape varies between iterations. The\n`tf.Tensor.set_shape`\nfunction may also be used in the `body` function to indicate that\nthe output loop variable has a particular shape. The shape invariant for\nSparseTensor and IndexedSlices are treated specially as follows:\n\na) If a loop variable is a SparseTensor, the shape invariant must be\nTensorShape([r]) where r is the rank of the dense tensor represented\nby the sparse tensor. It means the shapes of the three tensors of the\nSparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here\nis the shape of the SparseTensor.dense_shape property. It must be the shape of\na vector.\n\nb) If a loop variable is an IndexedSlices, the shape invariant must be\na shape invariant of the values tensor of the IndexedSlices. It means\nthe shapes of the three tensors of the IndexedSlices are (shape, [shape[0]],\n[shape.ndims]).\n\n`while_loop` implements non-strict semantics, enabling multiple iterations\nto run in parallel. The maximum number of parallel iterations can be\ncontrolled by `parallel_iterations`, which gives users some control over\nmemory consumption and execution order. For correct programs, `while_loop`\nshould return the same result for any parallel_iterations > 0.\n\nFor training, TensorFlow stores the tensors that are produced in the\nforward inference and are needed in back propagation. These tensors are a\nmain source of memory consumption and often cause OOM errors when training\non GPUs. When the flag swap_memory is true, we swap out these tensors from\nGPU to CPU. This for example allows us to train RNN models with very long\nsequences and large batches.\n\nArgs:\n cond: A callable that represents the termination condition of the loop.\n body: A callable that represents the loop body.\n loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array,\n `Tensor`, and `TensorArray` objects.\n shape_invariants: The shape invariants for the loop variables.\n parallel_iterations: The number of iterations allowed to run in parallel. It\n must be a positive integer.\n back_prop: (optional) Deprecated. False disables support for back\n propagation. Prefer using `tf.stop_gradient` instead.\n swap_memory: Whether GPU-CPU memory swap is enabled for this loop.\n maximum_iterations: Optional maximum number of iterations of the while loop\n to run. If provided, the `cond` output is AND-ed with an additional\n condition ensuring the number of iterations executed is no greater than\n `maximum_iterations`.\n name: Optional name prefix for the returned tensors.\n\nReturns:\n The output tensors for the loop variables after the loop. The return value\n has the same structure as `loop_vars`.\n\nRaises:\n TypeError: if `cond` or `body` is not callable.\n ValueError: if `loop_vars` is empty.\n\nExample:\n\n```python\ni = tf.constant(0)\nc = lambda i: tf.less(i, 10)\nb = lambda i: (tf.add(i, 1), )\nr = tf.while_loop(c, b, [i])\n```\n\nExample with nesting and a namedtuple:\n\n```python\nimport collections\nPair = collections.namedtuple('Pair', 'j, k')\nijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2)))\nc = lambda i, p: i < 10\nb = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k)))\nijk_final = tf.while_loop(c, b, ijk_0)\n```\n\nExample using shape_invariants:\n\n```python\ni0 = tf.constant(0)\nm0 = tf.ones([2, 2])\nc = lambda i, m: i < 10\nb = lambda i, m: [i+1, tf.concat([m, m], axis=0)]\ntf.while_loop(\n c, b, loop_vars=[i0, m0],\n shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])])\n```\n\nExample which demonstrates non-strict semantics: In the following\nexample, the final value of the counter `i` does not depend on `x`. So\nthe `while_loop` can increment the counter parallel to updates of `x`.\nHowever, because the loop counter at one loop iteration depends\non the value at the previous iteration, the loop counter itself cannot\nbe incremented in parallel. Hence if we just want the final value of the\ncounter (which we print on the line `print(sess.run(i))`), then\n`x` will never be incremented, but the counter will be updated on a\nsingle thread. Conversely, if we want the value of the output (which we\nprint on the line `print(sess.run(out).shape)`), then the counter may be\nincremented on its own thread, while `x` can be incremented in\nparallel on a separate thread. In the extreme case, it is conceivable\nthat the thread incrementing the counter runs until completion before\n`x` is incremented even a single time. The only thing that can never\nhappen is that the thread updating `x` can never get ahead of the\ncounter thread because the thread incrementing `x` depends on the value\nof the counter.\n\n```python\nimport tensorflow as tf\n\nn = 10000\nx = tf.constant(list(range(n)))\nc = lambda i, x: i < n\nb = lambda i, x: (tf.compat.v1.Print(i + 1, [i]), tf.compat.v1.Print(x + 1,\n[i], \"x:\"))\ni, out = tf.while_loop(c, b, (0, x))\nwith tf.compat.v1.Session() as sess:\n print(sess.run(i)) # prints [0] ... [9999]\n\n # The following line may increment the counter and x in parallel.\n # The counter thread may get ahead of the other thread, but not the\n # other way around. So you may see things like\n # [9996] x:[9987]\n # meaning that the counter thread is on iteration 9996,\n # while the other thread is on iteration 9987\n print(sess.run(out).shape)\n```"], ["tf.xla", "description: Public API for tf.xla namespace.", true, "Public API for tf.xla namespace.\n"], ["tf.zeros", "description: Creates a tensor with all elements set to zero.", false, "Creates a tensor with all elements set to zero.\n\n See also `tf.zeros_like`, `tf.ones`, `tf.fill`, `tf.eye`.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to zero.\n\n >>> tf.zeros([3, 4], tf.int32)\n \n\n Args:\n shape: A `list` of integers, a `tuple` of integers, or\n a 1-D `Tensor` of type `int32`.\n dtype: The DType of an element in the resulting `Tensor`.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor` with all elements set to zero.\n "], ["tf.zeros_initializer", "description: Initializer that generates tensors initialized to 0.", false, "Initializer that generates tensors initialized to 0.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Examples:\n\n >>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.zeros_initializer())\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.zeros_like(tensor)\n \n\n >>> tf.zeros_like(tensor, dtype=tf.float32)\n \n\n >>> tf.zeros_like([[1, 2, 3], [4, 5, 6]])\n \n\n Args:\n input: A `Tensor` or array-like object.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string` (optional).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n "]] \ No newline at end of file +[{"name": "audio", "path": "./tf/audio.md", "desc": " modulePublic API for tf.audio namespace.", "type": "Modules", "docs": "Public API for tf.audio namespace.\n"}, {"name": "autodiff", "path": "./tf/autodiff.md", "desc": " modulePublic API for tf.autodiff namespace.", "type": "Modules", "docs": "Public API for tf.autodiff namespace.\n"}, {"name": "autograph", "path": "./tf/autograph.md", "desc": " moduleConversion of eager-style Python into TensorFlow graph code.", "type": "Modules", "docs": "Conversion of eager-style Python into TensorFlow graph code.\n\nNOTE: In TensorFlow 2.0, AutoGraph is automatically applied when using\n`tf.function`. This module contains lower-level APIs for advanced use.\n\nAutoGraph transforms a subset of Python which operates on TensorFlow objects\ninto equivalent TensorFlow graph code. When executing the graph, it has the same\neffect as if you ran the original code in eager mode.\nPython code which doesn't operate on TensorFlow objects remains functionally\nunchanged, but keep in mind that `tf.function` only executes such code at trace\ntime, and generally will not be consistent with eager execution.\n\nFor more information, see the\n[AutoGraph reference documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md),\nand the [tf.function guide](https://www.tensorflow.org/guide/function#autograph_transformations).\n\n"}, {"name": "bitwise", "path": "./tf/bitwise.md", "desc": " moduleOperations for manipulating the binary representations of integers.", "type": "Modules", "docs": "Operations for manipulating the binary representations of integers.\n"}, {"name": "compat", "path": "./tf/compat.md", "desc": " moduleCompatibility functions.", "type": "Modules", "docs": "Compatibility functions.\n\nThe `tf.compat` module contains two sets of compatibility functions.\n\n## Tensorflow 1.x and 2.x APIs\n\nThe `compat.v1` and `compat.v2` submodules provide a complete copy of both the\n`v1` and `v2` APIs for backwards and forwards compatibility across TensorFlow\nversions 1.x and 2.x. See the\n[migration guide](https://www.tensorflow.org/guide/migrate) for details.\n\n## Utilities for writing compatible code\n\nAside from the `compat.v1` and `compat.v2` submodules, `tf.compat` also contains\na set of helper functions for writing code that works in both:\n\n* TensorFlow 1.x and 2.x\n* Python 2 and 3\n\n\n## Type collections\n\nThe compatibility module also provides the following aliases for common\nsets of python types:\n\n* `bytes_or_text_types`\n* `complex_types`\n* `integral_types`\n* `real_types`\n\n"}, {"name": "config", "path": "./tf/config.md", "desc": " modulePublic API for tf.config namespace.", "type": "Modules", "docs": "Public API for tf.config namespace.\n"}, {"name": "data", "path": "./tf/data.md", "desc": " moduletf.data.Dataset API for input pipelines.", "type": "Modules", "docs": "`tf.data.Dataset` API for input pipelines.\n\nSee [Importing Data](https://tensorflow.org/guide/data) for an overview.\n\n"}, {"name": "debugging", "path": "./tf/debugging.md", "desc": " modulePublic API for tf.debugging namespace.", "type": "Modules", "docs": "Public API for tf.debugging namespace.\n"}, {"name": "distribute", "path": "./tf/distribute.md", "desc": " moduleLibrary for running a computation across multiple devices.", "type": "Modules", "docs": "Library for running a computation across multiple devices.\n\nThe intent of this library is that you can write an algorithm in a stylized way\nand it will be usable with a variety of different `tf.distribute.Strategy`\nimplementations. Each descendant will implement a different strategy for\ndistributing the algorithm across multiple devices/machines. Furthermore, these\nchanges can be hidden inside the specific layers and other library classes that\nneed special treatment to run in a distributed setting, so that most users'\nmodel definition code can run unchanged. The `tf.distribute.Strategy` API works\nthe same way with eager and graph execution.\n\n*Guides*\n\n* [TensorFlow v2.x](https://www.tensorflow.org/guide/distributed_training)\n* [TensorFlow v1.x](https://github.com/tensorflow/docs/blob/master/site/en/r1/guide/distribute_strategy.ipynb)\n\n*Tutorials*\n\n* [Distributed Training Tutorials](https://www.tensorflow.org/tutorials/distribute/)\n\n The tutorials cover how to use `tf.distribute.Strategy` to do distributed\n training with native Keras APIs, custom training loops,\n and Estimator APIs. They also cover how to save/load model when using\n `tf.distribute.Strategy`.\n\n*Glossary*\n\n* _Data parallelism_ is where we run multiple copies of the model\n on different slices of the input data. This is in contrast to\n _model parallelism_ where we divide up a single copy of a model\n across multiple devices.\n Note: we only support data parallelism for now, but\n hope to add support for model parallelism in the future.\n* A _device_ is a CPU or accelerator (e.g. GPUs, TPUs) on some machine that\n TensorFlow can run operations on (see e.g. `tf.device`). You may have multiple\n devices on a single machine, or be connected to devices on multiple\n machines. Devices used to run computations are called _worker devices_.\n Devices used to store variables are _parameter devices_. For some strategies,\n such as `tf.distribute.MirroredStrategy`, the worker and parameter devices\n will be the same (see mirrored variables below). For others they will be\n different. For example, `tf.distribute.experimental.CentralStorageStrategy`\n puts the variables on a single device (which may be a worker device or may be\n the CPU), and `tf.distribute.experimental.ParameterServerStrategy` puts the\n variables on separate machines called _parameter servers_ (see below).\n* A _replica_ is one copy of the model, running on one slice of the\n input data. Right now each replica is executed on its own\n worker device, but once we add support for model parallelism\n a replica may span multiple worker devices.\n* A _host_ is the CPU device on a machine with worker devices, typically\n used for running input pipelines.\n* A _worker_ is defined to be the physical machine(s) containing the physical\n devices (e.g. GPUs, TPUs) on which the replicated computation is executed. A\n worker may contain one or more replicas, but contains at least one\n replica. Typically one worker will correspond to one machine, but in the case\n of very large models with model parallelism, one worker may span multiple\n machines. We typically run one input pipeline per worker, feeding all the\n replicas on that worker.\n* _Synchronous_, or more commonly _sync_, training is where the updates from\n each replica are aggregated together before updating the model variables. This\n is in contrast to _asynchronous_, or _async_ training, where each replica\n updates the model variables independently. You may also have replicas\n partitioned into groups which are in sync within each group but async between\n groups.\n* _Parameter servers_: These are machines that hold a single copy of\n parameters/variables, used by some strategies (right now just\n `tf.distribute.experimental.ParameterServerStrategy`). All replicas that want\n to operate on a variable retrieve it at the beginning of a step and send an\n update to be applied at the end of the step. These can in principle support\n either sync or async training, but right now we only have support for async\n training with parameter servers. Compare to\n `tf.distribute.experimental.CentralStorageStrategy`, which puts all variables\n on a single device on the same machine (and does sync training), and\n `tf.distribute.MirroredStrategy`, which mirrors variables to multiple devices\n (see below).\n\n* _Replica context_ vs. _Cross-replica context_ vs _Update context_\n\n A _replica context_ applies\n when you execute the computation function that was called with `strategy.run`.\n Conceptually, you're in replica context when executing the computation\n function that is being replicated.\n\n An _update context_ is entered in a `tf.distribute.StrategyExtended.update`\n call.\n\n An _cross-replica context_ is entered when you enter a `strategy.scope`. This\n is useful for calling `tf.distribute.Strategy` methods which operate across\n the replicas (like `reduce_to()`). By default you start in a _replica context_\n (the \"default single _replica context_\") and then some methods can switch you\n back and forth.\n\n* _Distributed value_: Distributed value is represented by the base class\n `tf.distribute.DistributedValues`. `tf.distribute.DistributedValues` is useful\n to represent values on multiple devices, and it contains a map from replica id\n to values. Two representative kinds of `tf.distribute.DistributedValues` are\n \"PerReplica\" and \"Mirrored\" values.\n\n \"PerReplica\" values exist on the worker\n devices, with a different value for each replica. They are produced by\n iterating through a distributed dataset returned by\n `tf.distribute.Strategy.experimental_distribute_dataset` and\n `tf.distribute.Strategy.distribute_datasets_from_function`. They\n are also the typical result returned by\n `tf.distribute.Strategy.run`.\n\n \"Mirrored\" values are like \"PerReplica\" values, except we know that the value\n on all replicas are the same. We can safely read a \"Mirrored\" value in a\n cross-replica context by using the value on any replica.\n\n* _Unwrapping_ and _merging_: Consider calling a function `fn` on multiple\n replicas, like `strategy.run(fn, args=[w])` with an\n argument `w` that is a `tf.distribute.DistributedValues`. This means `w` will\n have a map taking replica id `0` to `w0`, replica id `1` to `w1`, etc.\n `strategy.run()` unwraps `w` before calling `fn`, so it calls `fn(w0)` on\n device `d0`, `fn(w1)` on device `d1`, etc. It then merges the return\n values from `fn()`, which leads to one common object if the returned values\n are the same object from every replica, or a `DistributedValues` object\n otherwise.\n\n* _Reductions_ and _all-reduce_: A _reduction_ is a method of aggregating\n multiple values into one value, like \"sum\" or \"mean\". If a strategy is doing\n sync training, we will perform a reduction on the gradients to a parameter\n from all replicas before applying the update. _All-reduce_ is an algorithm for\n performing a reduction on values from multiple devices and making the result\n available on all of those devices.\n\n* _Mirrored variables_: These are variables that are created on multiple\n devices, where we keep the variables in sync by applying the same\n updates to every copy. Mirrored variables are created with\n `tf.Variable(...synchronization=tf.VariableSynchronization.ON_WRITE...)`.\n Normally they are only used in synchronous training.\n\n* _SyncOnRead variables_\n\n _SyncOnRead variables_ are created by\n `tf.Variable(...synchronization=tf.VariableSynchronization.ON_READ...)`, and\n they are created on multiple devices. In replica context, each\n component variable on the local replica can perform reads and writes without\n synchronization with each other. When the\n _SyncOnRead variable_ is read in cross-replica context, the values from\n component variables are aggregated and returned.\n\n _SyncOnRead variables_ bring a lot of custom configuration difficulty to the\n underlying logic, so we do not encourage users to instantiate and use\n _SyncOnRead variable_ on their own. We have mainly used _SyncOnRead\n variables_ for use cases such as batch norm and metrics. For performance\n reasons, we often don't need to keep these statistics in sync every step and\n they can be accumulated on each replica independently. The only time we want\n to sync them is reporting or checkpointing, which typically happens in\n cross-replica context. _SyncOnRead variables_ are also often used by advanced\n users who want to control when variable values are aggregated. For example,\n users sometimes want to maintain gradients independently on each replica for a\n couple of steps without aggregation.\n\n* _Distribute-aware layers_\n\n Layers are generally called in a replica context, except when defining a\n Keras functional model. `tf.distribute.in_cross_replica_context` will let you\n determine which case you are in. If in a replica context,\n the `tf.distribute.get_replica_context` function will return the default\n replica context outside a strategy scope, `None` within a strategy scope, and\n a `tf.distribute.ReplicaContext` object inside a strategy scope and within a\n `tf.distribute.Strategy.run` function. The `ReplicaContext` object has an\n `all_reduce` method for aggregating across all replicas.\n\n\nNote that we provide a default version of `tf.distribute.Strategy` that is\nused when no other strategy is in scope, that provides the same API with\nreasonable default behavior.\n\n"}, {"name": "dtypes", "path": "./tf/dtypes.md", "desc": " modulePublic API for tf.dtypes namespace.", "type": "Modules", "docs": "Public API for tf.dtypes namespace.\n"}, {"name": "errors", "path": "./tf/errors.md", "desc": " moduleException types for TensorFlow errors.", "type": "Modules", "docs": "Exception types for TensorFlow errors.\n"}, {"name": "estimator", "path": "./tf/estimator.md", "desc": " moduleEstimatorHigh level tools for working with models.", "type": "Modules", "docs": null}, {"name": "experimental", "path": "./tf/experimental.md", "desc": " modulePublic API for tf.experimental namespace.", "type": "Modules", "docs": "Public API for tf.experimental namespace.\n"}, {"name": "feature_column", "path": "./tf/feature_column.md", "desc": " modulePublic API for tf.feature_column namespace.", "type": "Modules", "docs": "Public API for tf.feature_column namespace.\n"}, {"name": "graph_util", "path": "./tf/graph_util.md", "desc": " moduleHelpers to manipulate a tensor graph in python.", "type": "Modules", "docs": "Helpers to manipulate a tensor graph in python.\n\n"}, {"name": "image", "path": "./tf/image.md", "desc": " moduleImage ops.", "type": "Modules", "docs": "Image ops.\n\nThe `tf.image` module contains various functions for image\nprocessing and decoding-encoding Ops.\n\nMany of the encoding/decoding functions are also available in the\ncore `tf.io` module.\n\n## Image processing\n\n### Resizing\n\nThe resizing Ops accept input images as tensors of several types. They always\noutput resized images as float32 tensors.\n\nThe convenience function `tf.image.resize` supports both 4-D\nand 3-D tensors as input and output. 4-D tensors are for batches of images,\n3-D tensors for individual images.\n\nResized images will be distorted if their original aspect ratio is not the\nsame as size. To avoid distortions see tf.image.resize_with_pad.\n\n* `tf.image.resize`\n* `tf.image.resize_with_pad`\n* `tf.image.resize_with_crop_or_pad`\n\nThe Class `tf.image.ResizeMethod` provides various resize methods like\n`bilinear`, `nearest_neighbor`.\n\n### Converting Between Colorspaces\n\nImage ops work either on individual images or on batches of images, depending on\nthe shape of their input Tensor.\n\nIf 3-D, the shape is `[height, width, channels]`, and the Tensor represents one\nimage. If 4-D, the shape is `[batch_size, height, width, channels]`, and the\nTensor represents `batch_size` images.\n\nCurrently, `channels` can usefully be 1, 2, 3, or 4. Single-channel images are\ngrayscale, images with 3 channels are encoded as either RGB or HSV. Images\nwith 2 or 4 channels include an alpha channel, which has to be stripped from the\nimage before passing the image to most image processing functions (and can be\nre-attached later).\n\nInternally, images are either stored in as one `float32` per channel per pixel\n(implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel\nper pixel (values are assumed to lie in `[0,255]`).\n\nTensorFlow can convert between images in RGB or HSV or YIQ.\n\n* `tf.image.rgb_to_grayscale`, `tf.image.grayscale_to_rgb`\n* `tf.image.rgb_to_hsv`, `tf.image.hsv_to_rgb`\n* `tf.image.rgb_to_yiq`, `tf.image.yiq_to_rgb`\n* `tf.image.rgb_to_yuv`, `tf.image.yuv_to_rgb`\n* `tf.image.image_gradients`\n* `tf.image.convert_image_dtype`\n\n### Image Adjustments\n\nTensorFlow provides functions to adjust images in various ways: brightness,\ncontrast, hue, and saturation. Each adjustment can be done with predefined\nparameters or with random parameters picked from predefined intervals. Random\nadjustments are often useful to expand a training set and reduce overfitting.\n\nIf several adjustments are chained it is advisable to minimize the number of\nredundant conversions by first converting the images to the most natural data\ntype and representation.\n\n* `tf.image.adjust_brightness`\n* `tf.image.adjust_contrast`\n* `tf.image.adjust_gamma`\n* `tf.image.adjust_hue`\n* `tf.image.adjust_jpeg_quality`\n* `tf.image.adjust_saturation`\n* `tf.image.random_brightness`\n* `tf.image.random_contrast`\n* `tf.image.random_hue`\n* `tf.image.random_saturation`\n* `tf.image.per_image_standardization`\n\n### Working with Bounding Boxes\n\n* `tf.image.draw_bounding_boxes`\n* `tf.image.combined_non_max_suppression`\n* `tf.image.generate_bounding_box_proposals`\n* `tf.image.non_max_suppression`\n* `tf.image.non_max_suppression_overlaps`\n* `tf.image.non_max_suppression_padded`\n* `tf.image.non_max_suppression_with_scores`\n* `tf.image.pad_to_bounding_box`\n* `tf.image.sample_distorted_bounding_box`\n\n### Cropping\n\n* `tf.image.central_crop`\n* `tf.image.crop_and_resize`\n* `tf.image.crop_to_bounding_box`\n* `tf.io.decode_and_crop_jpeg`\n* `tf.image.extract_glimpse`\n* `tf.image.random_crop`\n* `tf.image.resize_with_crop_or_pad`\n\n### Flipping, Rotating and Transposing\n\n* `tf.image.flip_left_right`\n* `tf.image.flip_up_down`\n* `tf.image.random_flip_left_right`\n* `tf.image.random_flip_up_down`\n* `tf.image.rot90`\n* `tf.image.transpose`\n\n## Image decoding and encoding\n\nTensorFlow provides Ops to decode and encode JPEG and PNG formats. Encoded\nimages are represented by scalar string Tensors, decoded images by 3-D uint8\ntensors of shape `[height, width, channels]`. (PNG also supports uint16.)\n\nNote: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`\n\nThe encode and decode Ops apply to one image at a time. Their input and output\nare all of variable size. If you need fixed size images, pass the output of\nthe decode Ops to one of the cropping and resizing Ops.\n\n* `tf.io.decode_bmp`\n* `tf.io.decode_gif`\n* `tf.io.decode_image`\n* `tf.io.decode_jpeg`\n* `tf.io.decode_and_crop_jpeg`\n* `tf.io.decode_png`\n* `tf.io.encode_jpeg`\n* `tf.io.encode_png`\n\n\n"}, {"name": "io", "path": "./tf/io.md", "desc": " modulePublic API for tf.io namespace.", "type": "Modules", "docs": "Public API for tf.io namespace.\n"}, {"name": "keras", "path": "./tf/keras.md", "desc": " moduleImplementation of the Keras API, the high-level API of TensorFlow.", "type": "Modules", "docs": "Implementation of the Keras API, the high-level API of TensorFlow.\n\nDetailed documentation and user guides are available at\n[keras.io](https://keras.io).\n\n"}, {"name": "linalg", "path": "./tf/linalg.md", "desc": " moduleOperations for linear algebra.", "type": "Modules", "docs": "Operations for linear algebra.\n"}, {"name": "lite", "path": "./tf/lite.md", "desc": " modulePublic API for tf.lite namespace.", "type": "Modules", "docs": "Public API for tf.lite namespace.\n"}, {"name": "lookup", "path": "./tf/lookup.md", "desc": " modulePublic API for tf.lookup namespace.", "type": "Modules", "docs": "Public API for tf.lookup namespace.\n"}, {"name": "math", "path": "./tf/math.md", "desc": " moduleMath Operations.", "type": "Modules", "docs": "Math Operations.\n\nNote: Functions taking `Tensor` arguments can also take anything accepted by\n`tf.convert_to_tensor`.\n\nNote: Elementwise binary operations in TensorFlow follow [numpy-style\nbroadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).\n\nTensorFlow provides a variety of math functions including:\n\n* Basic arithmetic operators and trigonometric functions.\n* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)\n* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)\n* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)\n* Segment functions (like: `tf.math.segment_sum`)\n\nSee: `tf.linalg` for matrix and tensor functions.\n\n\n\n## About Segmentation\n\nTensorFlow provides several operations that you can use to perform common\nmath computations on tensor segments.\nHere a segmentation is a partitioning of a tensor along\nthe first dimension, i.e. it defines a mapping from the first dimension onto\n`segment_ids`. The `segment_ids` tensor should be the size of\nthe first dimension, `d0`, with consecutive IDs in the range `0` to `k`,\nwhere `k [[0 0 0 0]\n# [5 6 7 8]]\n```\n\nThe standard `segment_*` functions assert that the segment indices are sorted.\nIf you have unsorted indices use the equivalent `unsorted_segment_` function.\nThese functions take an additional argument `num_segments` so that the output\ntensor can be efficiently allocated.\n\n``` python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\ntf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)\n# ==> [[ 6, 8, 10, 12],\n# [-1, -2, -3, -4]]\n```\n\n\n"}, {"name": "mlir", "path": "./tf/mlir.md", "desc": " modulePublic API for tf.mlir namespace.", "type": "Modules", "docs": "Public API for tf.mlir namespace.\n"}, {"name": "nest", "path": "./tf/nest.md", "desc": " moduleFunctions that work with structures.", "type": "Modules", "docs": "Functions that work with structures.\n\nA structure is either:\n\n* one of the recognized Python collections, holding _nested structures_;\n* a value of any other type, typically a TensorFlow data type like Tensor,\n Variable, or of compatible types such as int, float, ndarray, etc. these are\n commonly referred to as _atoms_ of the structure.\n\nA structure of type `T` is a structure whose atomic items are of type `T`.\nFor example, a structure of `tf.Tensor` only contains `tf.Tensor` as its atoms.\n\nHistorically a _nested structure_ was called a _nested sequence_ in TensorFlow.\nA nested structure is sometimes called a _nest_ or a _tree_, but the formal\nname _nested structure_ is preferred.\n\nRefer to [Nesting Data Structures]\n(https://en.wikipedia.org/wiki/Nesting_(computing)#Data_structures).\n\nThe following collection types are recognized by `tf.nest` as nested\nstructures:\n\n* `collections.abc.Sequence` (except `string` and `bytes`).\n This includes `list`, `tuple`, and `namedtuple`.\n* `collections.abc.Mapping` (with sortable keys).\n This includes `dict` and `collections.OrderedDict`.\n* `collections.abc.MappingView` (with sortable keys).\n* [`attr.s` classes](https://www.attrs.org/).\n\nAny other values are considered **atoms**. Not all collection types are\nconsidered nested structures. For example, the following types are\nconsidered atoms:\n\n* `set`; `{\"a\", \"b\"}` is an atom, while `[\"a\", \"b\"]` is a nested structure.\n* [`dataclass` classes](https://docs.python.org/library/dataclasses.html)\n* `tf.Tensor`\n* `numpy.array`\n\n`tf.nest.is_nested` checks whether an object is a nested structure or an atom.\nFor example:\n\n >>> tf.nest.is_nested(\"1234\")\n False\n >>> tf.nest.is_nested([1, 3, [4, 5]])\n True\n >>> tf.nest.is_nested(((7, 8), (5, 6)))\n True\n >>> tf.nest.is_nested([])\n True\n >>> tf.nest.is_nested({\"a\": 1, \"b\": 2})\n True\n >>> tf.nest.is_nested({\"a\": 1, \"b\": 2}.keys())\n True\n >>> tf.nest.is_nested({\"a\": 1, \"b\": 2}.values())\n True\n >>> tf.nest.is_nested({\"a\": 1, \"b\": 2}.items())\n True\n >>> tf.nest.is_nested(set([1, 2]))\n False\n >>> ones = tf.ones([2, 3])\n >>> tf.nest.is_nested(ones)\n False\n\nNote: A proper structure shall form a tree. The user shall ensure there is no\ncyclic references within the items in the structure,\ni.e., no references in the structure of the input of these functions\nshould be recursive. The behavior is undefined if there is a cycle.\n\n\n"}, {"name": "nn", "path": "./tf/nn.md", "desc": " modulePrimitive Neural Net (NN", "type": "Modules", "docs": "Primitive Neural Net (NN) Operations.\n\n## Notes on padding\n\nSeveral neural network operations, such as `tf.nn.conv2d` and\n`tf.nn.max_pool2d`, take a `padding` parameter, which controls how the input is\npadded before running the operation. The input is padded by inserting values\n(typically zeros) before and after the tensor in each spatial dimension. The\n`padding` parameter can either be the string `'VALID'`, which means use no\npadding, or `'SAME'` which adds padding according to a formula which is\ndescribed below. Certain ops also allow the amount of padding per dimension to\nbe explicitly specified by passing a list to `padding`.\n\nIn the case of convolutions, the input is padded with zeros. In case of pools,\nthe padded input values are ignored. For example, in a max pool, the sliding\nwindow ignores padded values, which is equivalent to the padded values being\n`-infinity`.\n\n### `'VALID'` padding\n\nPassing `padding='VALID'` to an op causes no padding to be used. This causes the\noutput size to typically be smaller than the input size, even when the stride is\none. In the 2D case, the output size is computed as:\n\n```python\nout_height = ceil((in_height - filter_height + 1) / stride_height)\nout_width = ceil((in_width - filter_width + 1) / stride_width)\n```\n\nThe 1D and 3D cases are similar. Note `filter_height` and `filter_width` refer\nto the filter size after dilations (if any) for convolutions, and refer to the\nwindow size for pools.\n\n### `'SAME'` padding\n\nWith `'SAME'` padding, padding is applied to each spatial dimension. When the\nstrides are 1, the input is padded such that the output size is the same as the\ninput size. In the 2D case, the output size is computed as:\n\n```python\nout_height = ceil(in_height / stride_height)\nout_width = ceil(in_width / stride_width)\n```\n\nThe amount of padding used is the smallest amount that results in the output\nsize. The formula for the total amount of padding per dimension is:\n\n```python\nif (in_height % strides[1] == 0):\n pad_along_height = max(filter_height - stride_height, 0)\nelse:\n pad_along_height = max(filter_height - (in_height % stride_height), 0)\nif (in_width % strides[2] == 0):\n pad_along_width = max(filter_width - stride_width, 0)\nelse:\n pad_along_width = max(filter_width - (in_width % stride_width), 0)\n```\n\nFinally, the padding on the top, bottom, left and right are:\n\n```python\npad_top = pad_along_height // 2\npad_bottom = pad_along_height - pad_top\npad_left = pad_along_width // 2\npad_right = pad_along_width - pad_left\n```\n\nNote that the division by 2 means that there might be cases when the padding on\nboth sides (top vs bottom, right vs left) are off by one. In this case, the\nbottom and right sides always get the one additional padded pixel. For example,\nwhen pad_along_height is 5, we pad 2 pixels at the top and 3 pixels at the\nbottom. Note that this is different from existing libraries such as PyTorch and\nCaffe, which explicitly specify the number of padded pixels and always pad the\nsame number of pixels on both sides.\n\nHere is an example of `'SAME'` padding:\n\n>>> in_height = 5\n>>> filter_height = 3\n>>> stride_height = 2\n>>>\n>>> in_width = 2\n>>> filter_width = 2\n>>> stride_width = 1\n>>>\n>>> inp = tf.ones((2, in_height, in_width, 2))\n>>> filter = tf.ones((filter_height, filter_width, 2, 2))\n>>> strides = [stride_height, stride_width]\n>>> output = tf.nn.conv2d(inp, filter, strides, padding='SAME')\n>>> output.shape[1] # output_height: ceil(5 / 2)\n3\n>>> output.shape[2] # output_width: ceil(2 / 1)\n2\n\n### Explicit padding\n\nCertain ops, like `tf.nn.conv2d`, also allow a list of explicit padding amounts\nto be passed to the `padding` parameter. This list is in the same format as what\nis passed to `tf.pad`, except the padding must be a nested list, not a tensor.\nFor example, in the 2D case, the list is in the format `[[0, 0], [pad_top,\npad_bottom], [pad_left, pad_right], [0, 0]]` when `data_format` is its default\nvalue of `'NHWC'`. The two `[0, 0]` pairs indicate the batch and channel\ndimensions have no padding, which is required, as only spatial dimensions can\nhave padding.\n\nFor example:\n\n>>> inp = tf.ones((1, 3, 3, 1))\n>>> filter = tf.ones((2, 2, 1, 1))\n>>> strides = [1, 1]\n>>> padding = [[0, 0], [1, 2], [0, 1], [0, 0]]\n>>> output = tf.nn.conv2d(inp, filter, strides, padding=padding)\n>>> tuple(output.shape)\n(1, 5, 3, 1)\n>>> # Equivalently, tf.pad can be used, since convolutions pad with zeros.\n>>> inp = tf.pad(inp, padding)\n>>> # 'VALID' means to use no padding in conv2d (we already padded inp)\n>>> output2 = tf.nn.conv2d(inp, filter, strides, padding='VALID')\n>>> tf.debugging.assert_equal(output, output2)\n\n"}, {"name": "profiler", "path": "./tf/profiler.md", "desc": " modulePublic API for tf.profiler namespace.", "type": "Modules", "docs": "Public API for tf.profiler namespace.\n"}, {"name": "quantization", "path": "./tf/quantization.md", "desc": " modulePublic API for tf.quantization namespace.", "type": "Modules", "docs": "Public API for tf.quantization namespace.\n"}, {"name": "queue", "path": "./tf/queue.md", "desc": " modulePublic API for tf.queue namespace.", "type": "Modules", "docs": "Public API for tf.queue namespace.\n"}, {"name": "ragged", "path": "./tf/ragged.md", "desc": " moduleRagged Tensors.", "type": "Modules", "docs": "Ragged Tensors.\n\nThis package defines ops for manipulating ragged tensors (`tf.RaggedTensor`),\nwhich are tensors with non-uniform shapes. In particular, each `RaggedTensor`\nhas one or more *ragged dimensions*, which are dimensions whose slices may have\ndifferent lengths. For example, the inner (column) dimension of\n`rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices\n(`rt[0, :]`, ..., `rt[4, :]`) have different lengths. For a more detailed\ndescription of ragged tensors, see the `tf.RaggedTensor` class documentation\nand the [Ragged Tensor Guide](/guide/ragged_tensor).\n\n\n### Additional ops that support `RaggedTensor`\n\nArguments that accept `RaggedTensor`s are marked in **bold**.\n\n* `tf.__operators__.eq`(**self**, **other**)\n* `tf.__operators__.ne`(**self**, **other**)\n* `tf.bitcast`(**input**, type, name=`None`)\n* `tf.bitwise.bitwise_and`(**x**, **y**, name=`None`)\n* `tf.bitwise.bitwise_or`(**x**, **y**, name=`None`)\n* `tf.bitwise.bitwise_xor`(**x**, **y**, name=`None`)\n* `tf.bitwise.invert`(**x**, name=`None`)\n* `tf.bitwise.left_shift`(**x**, **y**, name=`None`)\n* `tf.bitwise.right_shift`(**x**, **y**, name=`None`)\n* `tf.broadcast_to`(**input**, **shape**, name=`None`)\n* `tf.cast`(**x**, dtype, name=`None`)\n* `tf.clip_by_value`(**t**, clip_value_min, clip_value_max, name=`None`)\n* `tf.concat`(**values**, axis, name=`'concat'`)\n* `tf.debugging.check_numerics`(**tensor**, message, name=`None`)\n* `tf.dtypes.complex`(**real**, **imag**, name=`None`)\n* `tf.dtypes.saturate_cast`(**value**, dtype, name=`None`)\n* `tf.dynamic_partition`(**data**, **partitions**, num_partitions, name=`None`)\n* `tf.expand_dims`(**input**, axis, name=`None`)\n* `tf.gather_nd`(**params**, **indices**, batch_dims=`0`, name=`None`)\n* `tf.gather`(**params**, **indices**, validate_indices=`None`, axis=`None`, batch_dims=`0`, name=`None`)\n* `tf.image.adjust_brightness`(**image**, delta)\n* `tf.image.adjust_gamma`(**image**, gamma=`1`, gain=`1`)\n* `tf.image.convert_image_dtype`(**image**, dtype, saturate=`False`, name=`None`)\n* `tf.image.random_brightness`(**image**, max_delta, seed=`None`)\n* `tf.image.resize`(**images**, size, method=`'bilinear'`, preserve_aspect_ratio=`False`, antialias=`False`, name=`None`)\n* `tf.image.stateless_random_brightness`(**image**, max_delta, seed)\n* `tf.io.decode_base64`(**input**, name=`None`)\n* `tf.io.decode_compressed`(**bytes**, compression_type=`''`, name=`None`)\n* `tf.io.encode_base64`(**input**, pad=`False`, name=`None`)\n* `tf.linalg.matmul`(**a**, **b**, transpose_a=`False`, transpose_b=`False`, adjoint_a=`False`, adjoint_b=`False`, a_is_sparse=`False`, b_is_sparse=`False`, output_type=`None`, name=`None`)\n* `tf.math.abs`(**x**, name=`None`)\n* `tf.math.acos`(**x**, name=`None`)\n* `tf.math.acosh`(**x**, name=`None`)\n* `tf.math.add_n`(**inputs**, name=`None`)\n* `tf.math.add`(**x**, **y**, name=`None`)\n* `tf.math.angle`(**input**, name=`None`)\n* `tf.math.asin`(**x**, name=`None`)\n* `tf.math.asinh`(**x**, name=`None`)\n* `tf.math.atan2`(**y**, **x**, name=`None`)\n* `tf.math.atan`(**x**, name=`None`)\n* `tf.math.atanh`(**x**, name=`None`)\n* `tf.math.bessel_i0`(**x**, name=`None`)\n* `tf.math.bessel_i0e`(**x**, name=`None`)\n* `tf.math.bessel_i1`(**x**, name=`None`)\n* `tf.math.bessel_i1e`(**x**, name=`None`)\n* `tf.math.ceil`(**x**, name=`None`)\n* `tf.math.conj`(**x**, name=`None`)\n* `tf.math.cos`(**x**, name=`None`)\n* `tf.math.cosh`(**x**, name=`None`)\n* `tf.math.digamma`(**x**, name=`None`)\n* `tf.math.divide_no_nan`(**x**, **y**, name=`None`)\n* `tf.math.divide`(**x**, **y**, name=`None`)\n* `tf.math.equal`(**x**, **y**, name=`None`)\n* `tf.math.erf`(**x**, name=`None`)\n* `tf.math.erfc`(**x**, name=`None`)\n* `tf.math.erfcinv`(**x**, name=`None`)\n* `tf.math.erfinv`(**x**, name=`None`)\n* `tf.math.exp`(**x**, name=`None`)\n* `tf.math.expm1`(**x**, name=`None`)\n* `tf.math.floor`(**x**, name=`None`)\n* `tf.math.floordiv`(**x**, **y**, name=`None`)\n* `tf.math.floormod`(**x**, **y**, name=`None`)\n* `tf.math.greater_equal`(**x**, **y**, name=`None`)\n* `tf.math.greater`(**x**, **y**, name=`None`)\n* `tf.math.imag`(**input**, name=`None`)\n* `tf.math.is_finite`(**x**, name=`None`)\n* `tf.math.is_inf`(**x**, name=`None`)\n* `tf.math.is_nan`(**x**, name=`None`)\n* `tf.math.less_equal`(**x**, **y**, name=`None`)\n* `tf.math.less`(**x**, **y**, name=`None`)\n* `tf.math.lgamma`(**x**, name=`None`)\n* `tf.math.log1p`(**x**, name=`None`)\n* `tf.math.log_sigmoid`(**x**, name=`None`)\n* `tf.math.log`(**x**, name=`None`)\n* `tf.math.logical_and`(**x**, **y**, name=`None`)\n* `tf.math.logical_not`(**x**, name=`None`)\n* `tf.math.logical_or`(**x**, **y**, name=`None`)\n* `tf.math.logical_xor`(**x**, **y**, name=`'LogicalXor'`)\n* `tf.math.maximum`(**x**, **y**, name=`None`)\n* `tf.math.minimum`(**x**, **y**, name=`None`)\n* `tf.math.multiply_no_nan`(**x**, **y**, name=`None`)\n* `tf.math.multiply`(**x**, **y**, name=`None`)\n* `tf.math.ndtri`(**x**, name=`None`)\n* `tf.math.negative`(**x**, name=`None`)\n* `tf.math.nextafter`(**x1**, x2, name=`None`)\n* `tf.math.not_equal`(**x**, **y**, name=`None`)\n* `tf.math.pow`(**x**, **y**, name=`None`)\n* `tf.math.real`(**input**, name=`None`)\n* `tf.math.reciprocal_no_nan`(**x**, name=`None`)\n* `tf.math.reciprocal`(**x**, name=`None`)\n* `tf.math.reduce_all`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_any`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_max`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_mean`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_min`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_prod`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_std`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_sum`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_variance`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.rint`(**x**, name=`None`)\n* `tf.math.round`(**x**, name=`None`)\n* `tf.math.rsqrt`(**x**, name=`None`)\n* `tf.math.scalar_mul`(**scalar**, **x**, name=`None`)\n* `tf.math.sigmoid`(**x**, name=`None`)\n* `tf.math.sign`(**x**, name=`None`)\n* `tf.math.sin`(**x**, name=`None`)\n* `tf.math.sinh`(**x**, name=`None`)\n* `tf.math.softplus`(**features**, name=`None`)\n* `tf.math.special.bessel_j0`(**x**, name=`None`)\n* `tf.math.special.bessel_j1`(**x**, name=`None`)\n* `tf.math.special.bessel_k0`(**x**, name=`None`)\n* `tf.math.special.bessel_k0e`(**x**, name=`None`)\n* `tf.math.special.bessel_k1`(**x**, name=`None`)\n* `tf.math.special.bessel_k1e`(**x**, name=`None`)\n* `tf.math.special.bessel_y0`(**x**, name=`None`)\n* `tf.math.special.bessel_y1`(**x**, name=`None`)\n* `tf.math.special.dawsn`(**x**, name=`None`)\n* `tf.math.special.expint`(**x**, name=`None`)\n* `tf.math.special.fresnel_cos`(**x**, name=`None`)\n* `tf.math.special.fresnel_sin`(**x**, name=`None`)\n* `tf.math.special.spence`(**x**, name=`None`)\n* `tf.math.sqrt`(**x**, name=`None`)\n* `tf.math.square`(**x**, name=`None`)\n* `tf.math.squared_difference`(**x**, **y**, name=`None`)\n* `tf.math.subtract`(**x**, **y**, name=`None`)\n* `tf.math.tan`(**x**, name=`None`)\n* `tf.math.tanh`(**x**, name=`None`)\n* `tf.math.truediv`(**x**, **y**, name=`None`)\n* `tf.math.unsorted_segment_max`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_mean`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_min`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_prod`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_sqrt_n`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_sum`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.xdivy`(**x**, **y**, name=`None`)\n* `tf.math.xlog1py`(**x**, **y**, name=`None`)\n* `tf.math.xlogy`(**x**, **y**, name=`None`)\n* `tf.math.zeta`(**x**, **q**, name=`None`)\n* `tf.nn.dropout`(**x**, rate, noise_shape=`None`, seed=`None`, name=`None`)\n* `tf.nn.elu`(**features**, name=`None`)\n* `tf.nn.gelu`(**features**, approximate=`False`, name=`None`)\n* `tf.nn.leaky_relu`(**features**, alpha=`0.2`, name=`None`)\n* `tf.nn.relu6`(**features**, name=`None`)\n* `tf.nn.relu`(**features**, name=`None`)\n* `tf.nn.selu`(**features**, name=`None`)\n* `tf.nn.sigmoid_cross_entropy_with_logits`(**labels**=`None`, **logits**=`None`, name=`None`)\n* `tf.nn.silu`(**features**, beta=`1.0`)\n* `tf.nn.softmax`(**logits**, axis=`None`, name=`None`)\n* `tf.nn.softsign`(**features**, name=`None`)\n* `tf.one_hot`(**indices**, depth, on_value=`None`, off_value=`None`, axis=`None`, dtype=`None`, name=`None`)\n* `tf.ones_like`(**input**, dtype=`None`, name=`None`)\n* `tf.print`(***inputs**, **kwargs)\n* `tf.rank`(**input**, name=`None`)\n* `tf.realdiv`(**x**, **y**, name=`None`)\n* `tf.reshape`(**tensor**, **shape**, name=`None`)\n* `tf.reverse`(**tensor**, axis, name=`None`)\n* `tf.size`(**input**, out_type=`tf.int32`, name=`None`)\n* `tf.split`(**value**, num_or_size_splits, axis=`0`, num=`None`, name=`'split'`)\n* `tf.squeeze`(**input**, axis=`None`, name=`None`)\n* `tf.stack`(**values**, axis=`0`, name=`'stack'`)\n* `tf.strings.as_string`(**input**, precision=`-1`, scientific=`False`, shortest=`False`, width=`-1`, fill=`''`, name=`None`)\n* `tf.strings.format`(**template**, **inputs**, placeholder=`'{}'`, summarize=`3`, name=`None`)\n* `tf.strings.join`(**inputs**, separator=`''`, name=`None`)\n* `tf.strings.length`(**input**, unit=`'BYTE'`, name=`None`)\n* `tf.strings.lower`(**input**, encoding=`''`, name=`None`)\n* `tf.strings.reduce_join`(**inputs**, axis=`None`, keepdims=`False`, separator=`''`, name=`None`)\n* `tf.strings.regex_full_match`(**input**, pattern, name=`None`)\n* `tf.strings.regex_replace`(**input**, pattern, rewrite, replace_global=`True`, name=`None`)\n* `tf.strings.strip`(**input**, name=`None`)\n* `tf.strings.substr`(**input**, pos, len, unit=`'BYTE'`, name=`None`)\n* `tf.strings.to_hash_bucket_fast`(**input**, num_buckets, name=`None`)\n* `tf.strings.to_hash_bucket_strong`(**input**, num_buckets, key, name=`None`)\n* `tf.strings.to_hash_bucket`(**input**, num_buckets, name=`None`)\n* `tf.strings.to_number`(**input**, out_type=`tf.float32`, name=`None`)\n* `tf.strings.unicode_script`(**input**, name=`None`)\n* `tf.strings.unicode_transcode`(**input**, input_encoding, output_encoding, errors=`'replace'`, replacement_char=`65533`, replace_control_characters=`False`, name=`None`)\n* `tf.strings.upper`(**input**, encoding=`''`, name=`None`)\n* `tf.tile`(**input**, multiples, name=`None`)\n* `tf.truncatediv`(**x**, **y**, name=`None`)\n* `tf.truncatemod`(**x**, **y**, name=`None`)\n* `tf.where`(**condition**, **x**=`None`, **y**=`None`, name=`None`)\n* `tf.zeros_like`(**input**, dtype=`None`, name=`None`)n\n"}, {"name": "random", "path": "./tf/random.md", "desc": " modulePublic API for tf.random namespace.", "type": "Modules", "docs": "Public API for tf.random namespace.\n"}, {"name": "raw_ops", "path": "./tf/raw_ops.md", "desc": " modulePublic API for tf.raw_ops namespace.", "type": "Modules", "docs": "Public API for tf.raw_ops namespace.\n"}, {"name": "saved_model", "path": "./tf/saved_model.md", "desc": " modulePublic API for tf.saved_model namespace.", "type": "Modules", "docs": "Public API for tf.saved_model namespace.\n"}, {"name": "sets", "path": "./tf/sets.md", "desc": " moduleTensorflow set operations.", "type": "Modules", "docs": "Tensorflow set operations.\n"}, {"name": "signal", "path": "./tf/signal.md", "desc": " moduleSignal processing operations.", "type": "Modules", "docs": "Signal processing operations.\n\nSee the [tf.signal](https://tensorflow.org/api_guides/python/contrib.signal)\nguide.\n\n@@frame\n@@hamming_window\n@@hann_window\n@@inverse_stft\n@@inverse_stft_window_fn\n@@mfccs_from_log_mel_spectrograms\n@@linear_to_mel_weight_matrix\n@@overlap_and_add\n@@stft\n\n[hamming]: https://en.wikipedia.org/wiki/Window_function#Hamming_window\n[hann]: https://en.wikipedia.org/wiki/Window_function#Hann_window\n[mel]: https://en.wikipedia.org/wiki/Mel_scale\n[mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum\n[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform\n\n"}, {"name": "sparse", "path": "./tf/sparse.md", "desc": " moduleSparse Tensor Representation.", "type": "Modules", "docs": "Sparse Tensor Representation.\n\nSee also `tf.sparse.SparseTensor`.\n\n"}, {"name": "strings", "path": "./tf/strings.md", "desc": " moduleOperations for working with string Tensors.", "type": "Modules", "docs": "Operations for working with string Tensors.\n"}, {"name": "summary", "path": "./tf/summary.md", "desc": " moduleOperations for writing summary data, for use in analysis and visualization.", "type": "Modules", "docs": "Operations for writing summary data, for use in analysis and visualization.\n\nThe `tf.summary` module provides APIs for writing summary data. This data can be\nvisualized in TensorBoard, the visualization toolkit that comes with TensorFlow.\nSee the [TensorBoard website](https://www.tensorflow.org/tensorboard) for more\ndetailed tutorials about how to use these APIs, or some quick examples below.\n\nExample usage with eager execution, the default in TF 2.0:\n\n```python\nwriter = tf.summary.create_file_writer(\"/tmp/mylogs\")\nwith writer.as_default():\n for step in range(100):\n # other model code would go here\n tf.summary.scalar(\"my_metric\", 0.5, step=step)\n writer.flush()\n```\n\nExample usage with `tf.function` graph execution:\n\n```python\nwriter = tf.summary.create_file_writer(\"/tmp/mylogs\")\n\n@tf.function\ndef my_func(step):\n # other model code would go here\n with writer.as_default():\n tf.summary.scalar(\"my_metric\", 0.5, step=step)\n\nfor step in range(100):\n my_func(step)\n writer.flush()\n```\n\nExample usage with legacy TF 1.x graph execution:\n\n```python\nwith tf.compat.v1.Graph().as_default():\n step = tf.Variable(0, dtype=tf.int64)\n step_update = step.assign_add(1)\n writer = tf.summary.create_file_writer(\"/tmp/mylogs\")\n with writer.as_default():\n tf.summary.scalar(\"my_metric\", 0.5, step=step)\n all_summary_ops = tf.compat.v1.summary.all_v2_summary_ops()\n writer_flush = writer.flush()\n\n sess = tf.compat.v1.Session()\n sess.run([writer.init(), step.initializer])\n for i in range(100):\n sess.run(all_summary_ops)\n sess.run(step_update)\n sess.run(writer_flush)\n```\n"}, {"name": "sysconfig", "path": "./tf/sysconfig.md", "desc": " moduleSystem configuration library.", "type": "Modules", "docs": "System configuration library.\n"}, {"name": "test", "path": "./tf/test.md", "desc": " moduleTesting.", "type": "Modules", "docs": "Testing.\n"}, {"name": "tpu", "path": "./tf/tpu.md", "desc": " moduleOps related to Tensor Processing Units.", "type": "Modules", "docs": "Ops related to Tensor Processing Units.\n"}, {"name": "train", "path": "./tf/train.md", "desc": " moduleSupport for training models.", "type": "Modules", "docs": "Support for training models.\n\nSee the [Training](https://tensorflow.org/api_guides/python/train) guide.\n\n"}, {"name": "types", "path": "./tf/types.md", "desc": " modulePublic TensorFlow type definitions.", "type": "Modules", "docs": "Public TensorFlow type definitions.\n\nFor details, see\nhttps://github.com/tensorflow/community/blob/master/rfcs/20200211-tf-types.md.\n\n"}, {"name": "version", "path": "./tf/version.md", "desc": " modulePublic API for tf.version namespace.", "type": "Modules", "docs": "Public API for tf.version namespace.\n"}, {"name": "xla", "path": "./tf/xla.md", "desc": " modulePublic API for tf.xla namespace.", "type": "Modules", "docs": "Public API for tf.xla namespace.\n"}, {"name": "AggregationMethod", "path": "./tf/AggregationMethod.md", "desc": "A class listing aggregation methods used to combine gradients.", "type": "Classes", "docs": "A class listing aggregation methods used to combine gradients.\n\n Computing partial derivatives can require aggregating gradient\n contributions. This class lists the various methods that can\n be used to combine gradients in the graph.\n\n The following aggregation methods are part of the stable API for\n aggregating gradients:\n\n * `ADD_N`: All of the gradient terms are summed as part of one\n operation using the \"AddN\" op (see `tf.add_n`). This\n method has the property that all gradients must be ready and\n buffered separately in memory before any aggregation is performed.\n * `DEFAULT`: The system-chosen default aggregation method.\n\n The following aggregation methods are experimental and may not\n be supported in future releases:\n\n * `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using\n the \"AddN\" op. This method of summing gradients may reduce\n performance, but it can improve memory utilization because the\n gradients can be released earlier.\n\n "}, {"name": "CriticalSection", "path": "./tf/CriticalSection.md", "desc": "Critical section.", "type": "Classes", "docs": "Critical section.\n\n A `CriticalSection` object is a resource in the graph which executes subgraphs\n in **serial** order. A common example of a subgraph one may wish to run\n exclusively is the one given by the following function:\n\n ```python\n v = resource_variable_ops.ResourceVariable(0.0, name=\"v\")\n\n def count():\n value = v.read_value()\n with tf.control_dependencies([value]):\n with tf.control_dependencies([v.assign_add(1)]):\n return tf.identity(value)\n ```\n\n Here, a snapshot of `v` is captured in `value`; and then `v` is updated.\n The snapshot value is returned.\n\n If multiple workers or threads all execute `count` in parallel, there is no\n guarantee that access to the variable `v` is atomic at any point within\n any thread's calculation of `count`. In fact, even implementing an atomic\n counter that guarantees that the user will see each value `0, 1, ...,` is\n currently impossible.\n\n The solution is to ensure any access to the underlying resource `v` is\n only processed through a critical section:\n\n ```python\n cs = CriticalSection()\n f1 = cs.execute(count)\n f2 = cs.execute(count)\n output = f1 + f2\n session.run(output)\n ```\n The functions `f1` and `f2` will be executed serially, and updates to `v`\n will be atomic.\n\n **NOTES**\n\n All resource objects, including the critical section and any captured\n variables of functions executed on that critical section, will be\n colocated to the same device (host and cpu/gpu).\n\n When using multiple critical sections on the same resources, there is no\n guarantee of exclusive access to those resources. This behavior is disallowed\n by default (but see the kwarg `exclusive_resource_access`).\n\n For example, running the same function in two separate critical sections\n will not ensure serial execution:\n\n ```python\n v = tf.compat.v1.get_variable(\"v\", initializer=0.0, use_resource=True)\n def accumulate(up):\n x = v.read_value()\n with tf.control_dependencies([x]):\n with tf.control_dependencies([v.assign_add(up)]):\n return tf.identity(x)\n ex1 = CriticalSection().execute(\n accumulate, 1.0, exclusive_resource_access=False)\n ex2 = CriticalSection().execute(\n accumulate, 1.0, exclusive_resource_access=False)\n bad_sum = ex1 + ex2\n sess.run(v.initializer)\n sess.run(bad_sum) # May return 0.0\n ```\n "}, {"name": "DType", "path": "./tf/dtypes/DType.md", "desc": "Represents the type of the elements in a `Tensor`.", "type": "Classes", "docs": "Represents the type of the elements in a `Tensor`.\n\n `DType`'s are used to specify the output data type for operations which\n require it, or to inspect the data type of existing `Tensor`'s.\n\n Examples:\n\n >>> tf.constant(1, dtype=tf.int64)\n \n >>> tf.constant(1.0).dtype\n tf.float32\n\n See `tf.dtypes` for a complete list of `DType`'s defined.\n "}, {"name": "DeviceSpec", "path": "./tf/DeviceSpec.md", "desc": "Represents a (possibly partial", "type": "Classes", "docs": "Represents a (possibly partial) specification for a TensorFlow device.\n\n `DeviceSpec`s are used throughout TensorFlow to describe where state is stored\n and computations occur. Using `DeviceSpec` allows you to parse device spec\n strings to verify their validity, merge them or compose them programmatically.\n\n Example:\n\n ```python\n # Place the operations on device \"GPU:0\" in the \"ps\" job.\n device_spec = DeviceSpec(job=\"ps\", device_type=\"GPU\", device_index=0)\n with tf.device(device_spec.to_string()):\n # Both my_var and squared_var will be placed on /job:ps/device:GPU:0.\n my_var = tf.Variable(..., name=\"my_variable\")\n squared_var = tf.square(my_var)\n ```\n\n With eager execution disabled (by default in TensorFlow 1.x and by calling\n disable_eager_execution() in TensorFlow 2.x), the following syntax\n can be used:\n\n ```python\n tf.compat.v1.disable_eager_execution()\n\n # Same as previous\n device_spec = DeviceSpec(job=\"ps\", device_type=\"GPU\", device_index=0)\n # No need of .to_string() method.\n with tf.device(device_spec):\n my_var = tf.Variable(..., name=\"my_variable\")\n squared_var = tf.square(my_var)\n ```\n\n If a `DeviceSpec` is partially specified, it will be merged with other\n `DeviceSpec`s according to the scope in which it is defined. `DeviceSpec`\n components defined in inner scopes take precedence over those defined in\n outer scopes.\n\n ```python\n gpu0_spec = DeviceSpec(job=\"ps\", device_type=\"GPU\", device_index=0)\n with tf.device(DeviceSpec(job=\"train\").to_string()):\n with tf.device(gpu0_spec.to_string()):\n # Nodes created here will be assigned to /job:ps/device:GPU:0.\n with tf.device(DeviceSpec(device_type=\"GPU\", device_index=1).to_string()):\n # Nodes created here will be assigned to /job:train/device:GPU:1.\n ```\n\n A `DeviceSpec` consists of 5 components -- each of\n which is optionally specified:\n\n * Job: The job name.\n * Replica: The replica index.\n * Task: The task index.\n * Device type: The device type string (e.g. \"CPU\" or \"GPU\").\n * Device index: The device index.\n "}, {"name": "GradientTape", "path": "./tf/GradientTape.md", "desc": "Record operations for automatic differentiation.", "type": "Classes", "docs": "Record operations for automatic differentiation.\n\n Operations are recorded if they are executed within this context manager and\n at least one of their inputs is being \"watched\".\n\n Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,\n where `trainable=True` is default in both cases) are automatically watched.\n Tensors can be manually watched by invoking the `watch` method on this context\n manager.\n\n For example, consider the function `y = x * x`. The gradient at `x = 3.0` can\n be computed as:\n\n >>> x = tf.constant(3.0)\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... y = x * x\n >>> dy_dx = g.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(6.0, shape=(), dtype=float32)\n\n GradientTapes can be nested to compute higher-order derivatives. For example,\n\n >>> x = tf.constant(5.0)\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... with tf.GradientTape() as gg:\n ... gg.watch(x)\n ... y = x * x\n ... dy_dx = gg.gradient(y, x) # dy_dx = 2 * x\n >>> d2y_dx2 = g.gradient(dy_dx, x) # d2y_dx2 = 2\n >>> print(dy_dx)\n tf.Tensor(10.0, shape=(), dtype=float32)\n >>> print(d2y_dx2)\n tf.Tensor(2.0, shape=(), dtype=float32)\n\n By default, the resources held by a GradientTape are released as soon as\n GradientTape.gradient() method is called. To compute multiple gradients over\n the same computation, create a persistent gradient tape. This allows multiple\n calls to the gradient() method as resources are released when the tape object\n is garbage collected. For example:\n\n >>> x = tf.constant(3.0)\n >>> with tf.GradientTape(persistent=True) as g:\n ... g.watch(x)\n ... y = x * x\n ... z = y * y\n >>> dz_dx = g.gradient(z, x) # (4*x^3 at x = 3)\n >>> print(dz_dx)\n tf.Tensor(108.0, shape=(), dtype=float32)\n >>> dy_dx = g.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(6.0, shape=(), dtype=float32)\n\n By default GradientTape will automatically watch any trainable variables that\n are accessed inside the context. If you want fine grained control over which\n variables are watched you can disable automatic tracking by passing\n `watch_accessed_variables=False` to the tape constructor:\n\n >>> x = tf.Variable(2.0)\n >>> w = tf.Variable(5.0)\n >>> with tf.GradientTape(\n ... watch_accessed_variables=False, persistent=True) as tape:\n ... tape.watch(x)\n ... y = x ** 2 # Gradients will be available for `x`.\n ... z = w ** 3 # No gradients will be available as `w` isn't being watched.\n >>> dy_dx = tape.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(4.0, shape=(), dtype=float32)\n >>> # No gradients will be available as `w` isn't being watched.\n >>> dz_dw = tape.gradient(z, w)\n >>> print(dz_dw)\n None\n\n Note that when using models you should ensure that your variables exist when\n using `watch_accessed_variables=False`. Otherwise it's quite easy to make your\n first iteration not have any gradients:\n\n ```python\n a = tf.keras.layers.Dense(32)\n b = tf.keras.layers.Dense(32)\n\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(a.variables) # Since `a.build` has not been called at this point\n # `a.variables` will return an empty list and the\n # tape will not be watching anything.\n result = b(a(inputs))\n tape.gradient(result, a.variables) # The result of this computation will be\n # a list of `None`s since a's variables\n # are not being watched.\n ```\n\n Note that only tensors with real or complex dtypes are differentiable.\n "}, {"name": "Graph", "path": "./tf/Graph.md", "desc": "A TensorFlow computation, represented as a dataflow graph.", "type": "Classes", "docs": "A TensorFlow computation, represented as a dataflow graph.\n\n Graphs are used by `tf.function`s to represent the function's computations.\n Each graph contains a set of `tf.Operation` objects, which represent units of\n computation; and `tf.Tensor` objects, which represent the units of data that\n flow between operations.\n\n ### Using graphs directly (deprecated)\n\n A `tf.Graph` can be constructed and used directly without a `tf.function`, as\n was required in TensorFlow 1, but this is deprecated and it is recommended to\n use a `tf.function` instead. If a graph is directly used, other deprecated\n TensorFlow 1 classes are also required to execute the graph, such as a\n `tf.compat.v1.Session`.\n\n A default graph can be registered with the `tf.Graph.as_default` context\n manager. Then, operations will be added to the graph instead of being executed\n eagerly. For example:\n\n ```python\n g = tf.Graph()\n with g.as_default():\n # Define operations and tensors in `g`.\n c = tf.constant(30.0)\n assert c.graph is g\n ```\n\n `tf.compat.v1.get_default_graph()` can be used to obtain the default graph.\n\n Important note: This class *is not* thread-safe for graph construction. All\n operations should be created from a single thread, or external\n synchronization must be provided. Unless otherwise specified, all methods\n are not thread-safe.\n\n A `Graph` instance supports an arbitrary number of \"collections\"\n that are identified by name. For convenience when building a large\n graph, collections can store groups of related objects: for\n example, the `tf.Variable` uses a collection (named\n `tf.GraphKeys.GLOBAL_VARIABLES`) for\n all variables that are created during the construction of a graph. The caller\n may define additional collections by specifying a new name.\n "}, {"name": "IndexedSlices", "path": "./tf/IndexedSlices.md", "desc": "A sparse representation of a set of tensor slices at given indices.", "type": "Classes", "docs": "A sparse representation of a set of tensor slices at given indices.\n\n This class is a simple wrapper for a pair of `Tensor` objects:\n\n * `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.\n * `indices`: A 1-D integer `Tensor` with shape `[D0]`.\n\n An `IndexedSlices` is typically used to represent a subset of a larger\n tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.\n The values in `indices` are the indices in the first dimension of\n the slices that have been extracted from the larger tensor.\n\n The dense tensor `dense` represented by an `IndexedSlices` `slices` has\n\n ```python\n dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]\n ```\n\n The `IndexedSlices` class is used principally in the definition of\n gradients for operations that have sparse gradients\n (e.g. `tf.gather`).\n\n >>> v = tf.Variable([[0.,1, 2], [2, 3, 4], [4, 5, 6], [6, 7, 8]])\n >>> with tf.GradientTape() as tape:\n ... r = tf.gather(v, [1,3])\n >>> index_slices = tape.gradient(r,v)\n >>> index_slices\n <...IndexedSlices object ...>\n >>> index_slices.indices.numpy()\n array([1, 3], dtype=int32)\n >>> index_slices.values.numpy()\n array([[1., 1., 1.],\n [1., 1., 1.]], dtype=float32)\n\n Contrast this representation with\n `tf.sparse.SparseTensor`,\n which uses multi-dimensional indices and scalar values.\n "}, {"name": "IndexedSlicesSpec", "path": "./tf/IndexedSlicesSpec.md", "desc": "Type specification for a tf.IndexedSlices.", "type": "Classes", "docs": "Type specification for a `tf.IndexedSlices`."}, {"name": "Module", "path": "./tf/Module.md", "desc": "Base neural network module class.", "type": "Classes", "docs": "Base neural network module class.\n\n A module is a named container for `tf.Variable`s, other `tf.Module`s and\n functions which apply to user input. For example a dense layer in a neural\n network might be implemented as a `tf.Module`:\n\n >>> class Dense(tf.Module):\n ... def __init__(self, input_dim, output_size, name=None):\n ... super(Dense, self).__init__(name=name)\n ... self.w = tf.Variable(\n ... tf.random.normal([input_dim, output_size]), name='w')\n ... self.b = tf.Variable(tf.zeros([output_size]), name='b')\n ... def __call__(self, x):\n ... y = tf.matmul(x, self.w) + self.b\n ... return tf.nn.relu(y)\n\n You can use the Dense layer as you would expect:\n\n >>> d = Dense(input_dim=3, output_size=2)\n >>> d(tf.ones([1, 3]))\n \n\n\n By subclassing `tf.Module` instead of `object` any `tf.Variable` or\n `tf.Module` instances assigned to object properties can be collected using\n the `variables`, `trainable_variables` or `submodules` property:\n\n >>> d.variables\n (,\n )\n\n\n Subclasses of `tf.Module` can also take advantage of the `_flatten` method\n which can be used to implement tracking of any other types.\n\n All `tf.Module` classes have an associated `tf.name_scope` which can be used\n to group operations in TensorBoard and create hierarchies for variable names\n which can help with debugging. We suggest using the name scope when creating\n nested submodules/parameters or for forward methods whose graph you might want\n to inspect in TensorBoard. You can enter the name scope explicitly using\n `with self.name_scope:` or you can annotate methods (apart from `__init__`)\n with `@tf.Module.with_name_scope`.\n\n >>> class MLP(tf.Module):\n ... def __init__(self, input_size, sizes, name=None):\n ... super(MLP, self).__init__(name=name)\n ... self.layers = []\n ... with self.name_scope:\n ... for size in sizes:\n ... self.layers.append(Dense(input_dim=input_size, output_size=size))\n ... input_size = size\n ... @tf.Module.with_name_scope\n ... def __call__(self, x):\n ... for layer in self.layers:\n ... x = layer(x)\n ... return x\n\n >>> module = MLP(input_size=5, sizes=[5, 5])\n >>> module.variables\n (,\n ,\n ,\n )\n "}, {"name": "Operation", "path": "./tf/Operation.md", "desc": "Represents a graph node that performs computation on tensors.", "type": "Classes", "docs": "Represents a graph node that performs computation on tensors.\n\n An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`\n objects as input, and produces zero or more `Tensor` objects as output.\n Objects of type `Operation` are created by calling a Python op constructor\n (such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`\n context manager.\n\n For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an\n `Operation` of type \"MatMul\" that takes tensors `a` and `b` as input, and\n produces `c` as output.\n\n If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be\n executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for\n calling `tf.compat.v1.get_default_session().run(op)`.\n "}, {"name": "OptionalSpec", "path": "./tf/OptionalSpec.md", "desc": "Type specification for tf.experimental.Optional.", "type": "Classes", "docs": "Type specification for `tf.experimental.Optional`.\n\n For instance, `tf.OptionalSpec` can be used to define a tf.function that takes\n `tf.experimental.Optional` as an input argument:\n\n >>> @tf.function(input_signature=[tf.OptionalSpec(\n ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))])\n ... def maybe_square(optional):\n ... if optional.has_value():\n ... x = optional.get_value()\n ... return x * x\n ... return -1\n >>> optional = tf.experimental.Optional.from_value(5)\n >>> print(maybe_square(optional))\n tf.Tensor(25, shape=(), dtype=int32)\n\n Attributes:\n element_spec: A (nested) structure of `TypeSpec` objects that represents the\n type specification of the optional element.\n "}, {"name": "RaggedTensor", "path": "./tf/RaggedTensor.md", "desc": "Represents a ragged tensor.", "type": "Classes", "docs": "Represents a ragged tensor.\n\n A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are\n dimensions whose slices may have different lengths. For example, the inner\n (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,\n since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.\n Dimensions whose slices all have the same length are called *uniform\n dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,\n since it consists of a single slice (and so there is no possibility for\n differing slice lengths).\n\n The total number of dimensions in a `RaggedTensor` is called its *rank*,\n and the number of ragged dimensions in a `RaggedTensor` is called its\n *ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation\n time: it can't depend on the runtime values of `Tensor`s, and can't vary\n dynamically for different session runs.\n\n Note that the `__init__` constructor is private. Please use one of the\n following methods to construct a `RaggedTensor`:\n\n * `tf.RaggedTensor.from_row_lengths`\n * `tf.RaggedTensor.from_value_rowids`\n * `tf.RaggedTensor.from_row_splits`\n * `tf.RaggedTensor.from_row_starts`\n * `tf.RaggedTensor.from_row_limits`\n * `tf.RaggedTensor.from_nested_row_splits`\n * `tf.RaggedTensor.from_nested_row_lengths`\n * `tf.RaggedTensor.from_nested_value_rowids`\n\n ### Potentially Ragged Tensors\n\n Many ops support both `Tensor`s and `RaggedTensor`s\n (see [tf.ragged](https://www.tensorflow.org/api_docs/python/tf/ragged) for a\n full listing). The term \"potentially ragged tensor\" may be used to refer to a\n tensor that might be either a `Tensor` or a `RaggedTensor`. The ragged-rank\n of a `Tensor` is zero.\n\n ### Documenting RaggedTensor Shapes\n\n When documenting the shape of a RaggedTensor, ragged dimensions can be\n indicated by enclosing them in parentheses. For example, the shape of\n a 3-D `RaggedTensor` that stores the fixed-size word embedding for each\n word in a sentence, for each sentence in a batch, could be written as\n `[num_sentences, (num_words), embedding_size]`. The parentheses around\n `(num_words)` indicate that dimension is ragged, and that the length\n of each element list in that dimension may vary for each item.\n\n ### Component Tensors\n\n Internally, a `RaggedTensor` consists of a concatenated list of values that\n are partitioned into variable-length rows. In particular, each `RaggedTensor`\n consists of:\n\n * A `values` tensor, which concatenates the variable-length rows into a\n flattened list. For example, the `values` tensor for\n `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.\n\n * A `row_splits` vector, which indicates how those flattened values are\n divided into rows. In particular, the values for row `rt[i]` are stored\n in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.\n\n Example:\n\n >>> print(tf.RaggedTensor.from_row_splits(\n ... values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... row_splits=[0, 4, 4, 7, 8, 8]))\n \n\n ### Alternative Row-Partitioning Schemes\n\n In addition to `row_splits`, ragged tensors provide support for five other\n row-partitioning schemes:\n\n * `row_lengths`: a vector with shape `[nrows]`, which specifies the length\n of each row.\n\n * `value_rowids` and `nrows`: `value_rowids` is a vector with shape\n `[nvals]`, corresponding one-to-one with `values`, which specifies\n each value's row index. In particular, the row `rt[row]` consists of the\n values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an\n integer scalar that specifies the number of rows in the\n `RaggedTensor`. (`nrows` is used to indicate trailing empty rows.)\n\n * `row_starts`: a vector with shape `[nrows]`, which specifies the start\n offset of each row. Equivalent to `row_splits[:-1]`.\n\n * `row_limits`: a vector with shape `[nrows]`, which specifies the stop\n offset of each row. Equivalent to `row_splits[1:]`.\n\n * `uniform_row_length`: A scalar tensor, specifying the length of every\n row. This row-partitioning scheme may only be used if all rows have\n the same length.\n\n Example: The following ragged tensors are equivalent, and all represent the\n nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.\n\n >>> values = [3, 1, 4, 1, 5, 9, 2, 6]\n >>> RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])\n \n >>> RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])\n \n >>> RaggedTensor.from_value_rowids(\n ... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)\n \n >>> RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])\n \n >>> RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])\n \n >>> RaggedTensor.from_uniform_row_length(values, uniform_row_length=2)\n \n\n ### Multiple Ragged Dimensions\n\n `RaggedTensor`s with multiple ragged dimensions can be defined by using\n a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`\n adds a single ragged dimension.\n\n >>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above\n ... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])\n >>> outer_rt = RaggedTensor.from_row_splits(\n ... values=inner_rt, row_splits=[0, 3, 3, 5])\n >>> print(outer_rt.to_list())\n [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]\n >>> print(outer_rt.ragged_rank)\n 2\n\n The factory function `RaggedTensor.from_nested_row_splits` may be used to\n construct a `RaggedTensor` with multiple ragged dimensions directly, by\n providing a list of `row_splits` tensors:\n\n >>> RaggedTensor.from_nested_row_splits(\n ... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()\n [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]\n\n ### Uniform Inner Dimensions\n\n `RaggedTensor`s with uniform inner dimensions can be defined\n by using a multidimensional `Tensor` for `values`.\n\n >>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32),\n ... row_splits=[0, 2, 5])\n >>> print(rt.to_list())\n [[[1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]\n >>> print(rt.shape)\n (2, None, 3)\n\n ### Uniform Outer Dimensions\n\n `RaggedTensor`s with uniform outer dimensions can be defined by using\n one or more `RaggedTensor` with a `uniform_row_length` row-partitioning\n tensor. For example, a `RaggedTensor` with shape `[2, 2, None]` can be\n constructed with this method from a `RaggedTensor` values with shape\n `[4, None]`:\n\n >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n >>> print(values.shape)\n (4, None)\n >>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2)\n >>> print(rt6)\n \n >>> print(rt6.shape)\n (2, 2, None)\n\n Note that `rt6` only contains one ragged dimension (the innermost\n dimension). In contrast, if `from_row_splits` is used to construct a similar\n `RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:\n\n >>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])\n >>> print(rt7.shape)\n (2, None, None)\n\n Uniform and ragged outer dimensions may be interleaved, meaning that a\n tensor with any combination of ragged and uniform dimensions may be created.\n For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could\n be constructed as follows:\n\n ```python\n t0 = tf.zeros([1000, 2]) # Shape: [1000, 2]\n t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2]\n t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2]\n t3 = RaggedTensor.from_uniform_row_length(t2, 4) # [5, 4, 8, None, 2]\n t4 = RaggedTensor.from_row_lengths(t3, [...]) # [3, None, 4, 8, None, 2]\n ```\n\n "}, {"name": "RaggedTensorSpec", "path": "./tf/RaggedTensorSpec.md", "desc": "Type specification for a tf.RaggedTensor.", "type": "Classes", "docs": "Type specification for a `tf.RaggedTensor`."}, {"name": "RegisterGradient", "path": "./tf/RegisterGradient.md", "desc": "A decorator for registering the gradient function for an op type.", "type": "Classes", "docs": "A decorator for registering the gradient function for an op type.\n\n This decorator is only used when defining a new op type. For an op\n with `m` inputs and `n` outputs, the gradient function is a function\n that takes the original `Operation` and `n` `Tensor` objects\n (representing the gradients with respect to each output of the op),\n and returns `m` `Tensor` objects (representing the partial gradients\n with respect to each input of the op).\n\n For example, assuming that operations of type `\"Sub\"` take two\n inputs `x` and `y`, and return a single output `x - y`, the\n following gradient function would be registered:\n\n ```python\n @tf.RegisterGradient(\"Sub\")\n def _sub_grad(unused_op, grad):\n return grad, tf.negative(grad)\n ```\n\n The decorator argument `op_type` is the string type of an\n operation. This corresponds to the `OpDef.name` field for the proto\n that defines the operation.\n "}, {"name": "SparseTensor", "path": "./tf/sparse/SparseTensor.md", "desc": "Represents a sparse tensor.", "type": "Classes", "docs": "Represents a sparse tensor.\n\n TensorFlow represents a sparse tensor as three separate dense tensors:\n `indices`, `values`, and `dense_shape`. In Python, the three tensors are\n collected into a `SparseTensor` class for ease of use. If you have separate\n `indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor`\n object before passing to the ops below.\n\n Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)`\n comprises the following components, where `N` and `ndims` are the number\n of values and number of dimensions in the `SparseTensor`, respectively:\n\n * `indices`: A 2-D int64 tensor of shape `[N, ndims]`, which specifies the\n indices of the elements in the sparse tensor that contain nonzero values\n (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]` specifies\n that the elements with indexes of [1,3] and [2,4] have nonzero values.\n\n * `values`: A 1-D tensor of any type and shape `[N]`, which supplies the\n values for each element in `indices`. For example, given `indices=[[1,3],\n [2,4]]`, the parameter `values=[18, 3.6]` specifies that element [1,3] of\n the sparse tensor has a value of 18, and element [2,4] of the tensor has a\n value of 3.6.\n\n * `dense_shape`: A 1-D int64 tensor of shape `[ndims]`, which specifies the\n dense_shape of the sparse tensor. Takes a list indicating the number of\n elements in each dimension. For example, `dense_shape=[3,6]` specifies a\n two-dimensional 3x6 tensor, `dense_shape=[2,3,4]` specifies a\n three-dimensional 2x3x4 tensor, and `dense_shape=[9]` specifies a\n one-dimensional tensor with 9 elements.\n\n The corresponding dense tensor satisfies:\n\n ```python\n dense.shape = dense_shape\n dense[tuple(indices[i])] = values[i]\n ```\n\n By convention, `indices` should be sorted in row-major order (or equivalently\n lexicographic order on the tuples `indices[i]`). This is not enforced when\n `SparseTensor` objects are constructed, but most ops assume correct ordering.\n If the ordering of sparse tensor `st` is wrong, a fixed version can be\n obtained by calling `tf.sparse.reorder(st)`.\n\n Example: The sparse tensor\n\n ```python\n SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])\n ```\n\n represents the dense tensor\n\n ```python\n [[1, 0, 0, 0]\n [0, 0, 2, 0]\n [0, 0, 0, 0]]\n ```\n "}, {"name": "SparseTensorSpec", "path": "./tf/SparseTensorSpec.md", "desc": "Type specification for a tf.sparse.SparseTensor.", "type": "Classes", "docs": "Type specification for a `tf.sparse.SparseTensor`."}, {"name": "Tensor", "path": "./tf/Tensor.md", "desc": "A tf.Tensor represents a multidimensional array of elements.", "type": "Classes", "docs": "A `tf.Tensor` represents a multidimensional array of elements.\n\n All elements are of a single known data type.\n\n When writing a TensorFlow program, the main object that is\n manipulated and passed around is the `tf.Tensor`.\n\n A `tf.Tensor` has the following properties:\n\n * a single data type (float32, int32, or string, for example)\n * a shape\n\n TensorFlow supports eager execution and graph execution. In eager\n execution, operations are evaluated immediately. In graph\n execution, a computational graph is constructed for later\n evaluation.\n\n TensorFlow defaults to eager execution. In the example below, the\n matrix multiplication results are calculated immediately.\n\n >>> # Compute some values using a Tensor\n >>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n >>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])\n >>> e = tf.matmul(c, d)\n >>> print(e)\n tf.Tensor(\n [[1. 3.]\n [3. 7.]], shape=(2, 2), dtype=float32)\n\n Note that during eager execution, you may discover your `Tensors` are actually\n of type `EagerTensor`. This is an internal detail, but it does give you\n access to a useful function, `numpy`:\n\n >>> type(e)\n \n >>> print(e.numpy())\n [[1. 3.]\n [3. 7.]]\n\n In TensorFlow, `tf.function`s are a common way to define graph execution.\n\n A Tensor's shape (that is, the rank of the Tensor and the size of\n each dimension) may not always be fully known. In `tf.function`\n definitions, the shape may only be partially known.\n\n Most operations produce tensors of fully-known shapes if the shapes of their\n inputs are also fully known, but in some cases it's only possible to find the\n shape of a tensor at execution time.\n\n A number of specialized tensors are available: see `tf.Variable`,\n `tf.constant`, `tf.placeholder`, `tf.sparse.SparseTensor`, and\n `tf.RaggedTensor`.\n\n Caution: when constructing a tensor from a numpy array or pandas dataframe\n the underlying buffer may be re-used:\n\n ```python\n a = np.array([1, 2, 3])\n b = tf.constant(a)\n a[0] = 4\n print(b) # tf.Tensor([4 2 3], shape=(3,), dtype=int64)\n ```\n\n Note: this is an implementation detail that is subject to change and users\n should not rely on this behaviour.\n\n For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor).\n\n "}, {"name": "TensorArray", "path": "./tf/TensorArray.md", "desc": "Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.", "type": "Classes", "docs": "Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.\n\n This class is meant to be used with dynamic iteration primitives such as\n `while_loop` and `map_fn`. It supports gradient back-propagation via special\n \"flow\" control flow dependencies.\n\n Example 1: Plain reading and writing.\n\n >>> ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True, clear_after_read=False)\n >>> ta = ta.write(0, 10)\n >>> ta = ta.write(1, 20)\n >>> ta = ta.write(2, 30)\n >>>\n >>> ta.read(0)\n \n >>> ta.read(1)\n \n >>> ta.read(2)\n \n >>> ta.stack()\n \n\n Example 2: Fibonacci sequence algorithm that writes in a loop then returns.\n\n >>> @tf.function\n ... def fibonacci(n):\n ... ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\n ... ta = ta.unstack([0., 1.])\n ...\n ... for i in range(2, n):\n ... ta = ta.write(i, ta.read(i - 1) + ta.read(i - 2))\n ...\n ... return ta.stack()\n >>>\n >>> fibonacci(7)\n \n\n Example 3: A simple loop interacting with a `tf.Variable`.\n\n >>> v = tf.Variable(1)\n >>> @tf.function\n ... def f(x):\n ... ta = tf.TensorArray(tf.int32, size=0, dynamic_size=True)\n ... for i in tf.range(x):\n ... v.assign_add(i)\n ... ta = ta.write(i, v)\n ... return ta.stack()\n >>> f(5)\n \n "}, {"name": "TensorArraySpec", "path": "./tf/TensorArraySpec.md", "desc": "Type specification for a tf.TensorArray.", "type": "Classes", "docs": "Type specification for a `tf.TensorArray`."}, {"name": "TensorShape", "path": "./tf/TensorShape.md", "desc": "Represents the shape of a `Tensor`.", "type": "Classes", "docs": "Represents the shape of a `Tensor`.\n\n A `TensorShape` represents a possibly-partial shape specification for a\n `Tensor`. It may be one of the following:\n\n * *Fully-known shape:* has a known number of dimensions and a known size\n for each dimension. e.g. `TensorShape([16, 256])`\n * *Partially-known shape:* has a known number of dimensions, and an unknown\n size for one or more dimension. e.g. `TensorShape([None, 256])`\n * *Unknown shape:* has an unknown number of dimensions, and an unknown\n size in all dimensions. e.g. `TensorShape(None)`\n\n If a tensor is produced by an operation of type `\"Foo\"`, its shape\n may be inferred if there is a registered shape function for\n `\"Foo\"`. See [Shape\n functions](https://www.tensorflow.org/guide/create_op#shape_functions_in_c)\n for details of shape functions and how to register them. Alternatively,\n you may set the shape explicitly using `tf.Tensor.set_shape`.\n "}, {"name": "TensorSpec", "path": "./tf/TensorSpec.md", "desc": "Describes a tf.Tensor.", "type": "Classes", "docs": "Describes a tf.Tensor.\n\n Metadata for describing the `tf.Tensor` objects accepted or returned\n by some TensorFlow APIs.\n "}, {"name": "TypeSpec", "path": "./tf/TypeSpec.md", "desc": "Specifies a TensorFlow value type.", "type": "Classes", "docs": "Specifies a TensorFlow value type.\n\n A `tf.TypeSpec` provides metadata describing an object accepted or returned\n by TensorFlow APIs. Concrete subclasses, such as `tf.TensorSpec` and\n `tf.RaggedTensorSpec`, are used to describe different value types.\n\n For example, `tf.function`'s `input_signature` argument accepts a list\n (or nested structure) of `TypeSpec`s.\n\n Creating new subclasses of `TypeSpec` (outside of TensorFlow core) is not\n currently supported. In particular, we may make breaking changes to the\n private methods and properties defined by this base class.\n\n Example:\n\n >>> spec = tf.RaggedTensorSpec(shape=[None, None], dtype=tf.int32)\n >>> @tf.function(input_signature=[spec])\n ... def double(x):\n ... return x * 2\n >>> print(double(tf.ragged.constant([[1, 2], [3]])))\n \n "}, {"name": "UnconnectedGradients", "path": "./tf/UnconnectedGradients.md", "desc": "Controls how gradient computation behaves when y does not depend on x.", "type": "Classes", "docs": "Controls how gradient computation behaves when y does not depend on x.\n\n The gradient of y with respect to x can be zero in two different ways: there\n could be no differentiable path in the graph connecting x to y (and so we can\n statically prove that the gradient is zero) or it could be that runtime values\n of tensors in a particular execution lead to a gradient of zero (say, if a\n relu unit happens to not be activated). To allow you to distinguish between\n these two cases you can choose what value gets returned for the gradient when\n there is no path in the graph from x to y:\n\n * `NONE`: Indicates that [None] will be returned if there is no path from x\n to y\n * `ZERO`: Indicates that a zero tensor will be returned in the shape of x.\n "}, {"name": "Variable", "path": "./tf/Variable.md", "desc": "See the [variable guide](https://tensorflow.org/guide/variable", "type": "Classes", "docs": "See the [variable guide](https://tensorflow.org/guide/variable).\n\n A variable maintains shared, persistent state manipulated by a program.\n\n The `Variable()` constructor requires an initial value for the variable, which\n can be a `Tensor` of any type and shape. This initial value defines the type\n and shape of the variable. After construction, the type and shape of the\n variable are fixed. The value can be changed using one of the assign methods.\n\n >>> v = tf.Variable(1.)\n >>> v.assign(2.)\n \n >>> v.assign_add(0.5)\n \n\n The `shape` argument to `Variable`'s constructor allows you to construct a\n variable with a less defined shape than its `initial_value`:\n\n >>> v = tf.Variable(1., shape=tf.TensorShape(None))\n >>> v.assign([[1.]])\n dtype=float32, numpy=array([[1.]], ...)>\n\n Just like any `Tensor`, variables created with `Variable()` can be used as\n inputs to operations. Additionally, all the operators overloaded for the\n `Tensor` class are carried over to variables.\n\n >>> w = tf.Variable([[1.], [2.]])\n >>> x = tf.constant([[3., 4.]])\n >>> tf.matmul(w, x)\n \n >>> tf.sigmoid(w + x)\n \n\n When building a machine learning model it is often convenient to distinguish\n between variables holding trainable model parameters and other variables such\n as a `step` variable used to count training steps. To make this easier, the\n variable constructor supports a `trainable=`\n parameter. `tf.GradientTape` watches trainable variables by default:\n\n >>> with tf.GradientTape(persistent=True) as tape:\n ... trainable = tf.Variable(1.)\n ... non_trainable = tf.Variable(2., trainable=False)\n ... x1 = trainable * 2.\n ... x2 = non_trainable * 3.\n >>> tape.gradient(x1, trainable)\n \n >>> assert tape.gradient(x2, non_trainable) is None # Unwatched\n\n Variables are automatically tracked when assigned to attributes of types\n inheriting from `tf.Module`.\n\n >>> m = tf.Module()\n >>> m.v = tf.Variable([1.])\n >>> m.trainable_variables\n (,)\n\n This tracking then allows saving variable values to\n [training checkpoints](https://www.tensorflow.org/guide/checkpoint), or to\n [SavedModels](https://www.tensorflow.org/guide/saved_model) which include\n serialized TensorFlow graphs.\n\n Variables are often captured and manipulated by `tf.function`s. This works the\n same way the un-decorated function would have:\n\n >>> v = tf.Variable(0.)\n >>> read_and_decrement = tf.function(lambda: v.assign_sub(0.1))\n >>> read_and_decrement()\n \n >>> read_and_decrement()\n \n\n Variables created inside a `tf.function` must be owned outside the function\n and be created only once:\n\n >>> class M(tf.Module):\n ... @tf.function\n ... def __call__(self, x):\n ... if not hasattr(self, \"v\"): # Or set self.v to None in __init__\n ... self.v = tf.Variable(x)\n ... return self.v * x\n >>> m = M()\n >>> m(2.)\n \n >>> m(3.)\n \n >>> m.v\n \n\n See the `tf.function` documentation for details.\n "}, {"name": "VariableAggregation", "path": "./tf/VariableAggregation.md", "desc": "Indicates how a distributed variable will be aggregated.", "type": "Classes", "docs": "Indicates how a distributed variable will be aggregated.\n\n `tf.distribute.Strategy` distributes a model by making multiple copies\n (called \"replicas\") acting data-parallel on different elements of the input\n batch. When performing some variable-update operation, say\n `var.assign_add(x)`, in a model, we need to resolve how to combine the\n different values for `x` computed in the different replicas.\n\n * `NONE`: This is the default, giving an error if you use a\n variable-update operation with multiple replicas.\n * `SUM`: Add the updates across replicas.\n * `MEAN`: Take the arithmetic mean (\"average\") of the updates across replicas.\n * `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same\n update, but we only want to perform the update once. Used, e.g., for the\n global step counter.\n "}, {"name": "VariableSynchronization", "path": "./tf/VariableSynchronization.md", "desc": "Indicates when a distributed variable will be synced.", "type": "Classes", "docs": "Indicates when a distributed variable will be synced.\n\n * `AUTO`: Indicates that the synchronization will be determined by the current\n `DistributionStrategy` (eg. With `MirroredStrategy` this would be\n `ON_WRITE`).\n * `NONE`: Indicates that there will only be one copy of the variable, so\n there is no need to sync.\n * `ON_WRITE`: Indicates that the variable will be updated across devices\n every time it is written.\n * `ON_READ`: Indicates that the variable will be aggregated across devices\n when it is read (eg. when checkpointing or when evaluating an op that uses\n the variable).\n\n Example:\n >>> temp_grad=[tf.Variable([0.], trainable=False,\n ... synchronization=tf.VariableSynchronization.ON_READ,\n ... aggregation=tf.VariableAggregation.MEAN\n ... )]\n "}, {"name": "constant_initializer", "path": "./tf/constant_initializer.md", "desc": "Initializer that generates tensors with constant values.", "type": "Classes", "docs": "Initializer that generates tensors with constant values.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n `tf.constant_initializer` returns an object which when called returns a tensor\n populated with the `value` specified in the constructor. This `value` must be\n convertible to the requested `dtype`.\n\n The argument `value` can be a scalar constant value, or a list of\n values. Scalars broadcast to whichever shape is requested from the\n initializer.\n\n If `value` is a list, then the length of the list must be equal to the number\n of elements implied by the desired shape of the tensor. If the total number of\n elements in `value` is not equal to the number of elements required by the\n tensor shape, the initializer will raise a `TypeError`.\n\n Examples:\n\n >>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.constant_initializer(2.))\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> value = [0, 1, 2, 3, 4, 5, 6, 7]\n >>> init = tf.constant_initializer(value)\n >>> # Fitting shape\n >>> tf.Variable(init(shape=[2, 4], dtype=tf.float32))\n \n >>> # Larger shape\n >>> tf.Variable(init(shape=[3, 4], dtype=tf.float32))\n Traceback (most recent call last):\n ...\n TypeError: ...value has 8 elements, shape is (3, 4) with 12 elements...\n >>> # Smaller shape\n >>> tf.Variable(init(shape=[2, 3], dtype=tf.float32))\n Traceback (most recent call last):\n ...\n TypeError: ...value has 8 elements, shape is (2, 3) with 6 elements...\n\n Args:\n value: A Python scalar, list or tuple of values, or a N-dimensional numpy\n array. All elements of the initialized variable will be set to the\n corresponding value in the `value` argument.\n\n Raises:\n TypeError: If the input `value` is not one of the expected types.\n "}, {"name": "name_scope", "path": "./tf/name_scope.md", "desc": "A context manager for use when defining a Python op.", "type": "Classes", "docs": "A context manager for use when defining a Python op.\n\n This context manager pushes a name scope, which will make the name of all\n operations added within it have a prefix.\n\n For example, to define a new Python op called `my_op`:\n\n ```python\n def my_op(a, b, c, name=None):\n with tf.name_scope(\"MyOp\") as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n b = tf.convert_to_tensor(b, name=\"b\")\n c = tf.convert_to_tensor(c, name=\"c\")\n # Define some computation that uses `a`, `b`, and `c`.\n return foo_op(..., name=scope)\n ```\n\n When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,\n and `MyOp/c`.\n\n Inside a `tf.function`, if the scope name already exists, the name will be\n made unique by appending `_n`. For example, calling `my_op` the second time\n will generate `MyOp_1/a`, etc.\n "}, {"name": "ones_initializer", "path": "./tf/ones_initializer.md", "desc": "Initializer that generates tensors initialized to 1.", "type": "Classes", "docs": "Initializer that generates tensors initialized to 1.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Examples:\n\n >>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.ones_initializer())\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3,\n ... tf.random_normal_initializer(mean=1., stddev=2.))\n >>> v1\n \n >>> v2\n >> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.ones_initializer())\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.zeros_initializer())\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> # real number\n >>> x = tf.constant([-2.25, 3.25])\n >>> tf.abs(x)\n \n\n >>> # complex number\n >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])\n >>> tf.abs(x)\n \n\n Args:\n x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,\n `int32`, `int64`, `complex64` or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,\n with absolute values. Note, for `complex64` or `complex128` input, the\n returned `Tensor` will be of type `float32` or `float64`, respectively.\n\n If `x` is a `SparseTensor`, returns\n `SparseTensor(x.indices, tf.math.abs(x.values, ...), x.dense_shape)`"}, {"name": "acos", "path": "./tf/math/acos.md", "desc": "Computes acos of x element-wise.", "type": "Functions", "docs": "Computes acos of x element-wise.\n\n Provided an input tensor, the `tf.math.acos` operation\n returns the inverse cosine of each element of the tensor.\n If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.\n\n Input range is `[-1, 1]` and the output has a range of `[0, pi]`.\n\n For example:\n\n >>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32)\n >>> tf.math.acos(x)\n \n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,\n `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`,\n `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as x.\n "}, {"name": "acosh", "path": "./tf/math/acosh.md", "desc": "Computes inverse hyperbolic cosine of x element-wise.", "type": "Functions", "docs": "Computes inverse hyperbolic cosine of x element-wise.\n\n Given an input tensor, the function computes inverse hyperbolic cosine of every element.\n Input range is `[1, inf]`. It returns `nan` if the input lies outside the range.\n\n ```python\n x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "add", "path": "./tf/math/add.md", "desc": "Returns x + y element-wise.", "type": "Functions", "docs": "Returns x + y element-wise.\n\n Example usages below.\n\n Add a scalar and a list:\n\n >>> x = [1, 2, 3, 4, 5]\n >>> y = 1\n >>> tf.add(x, y)\n \n\n Note that binary `+` operator can be used instead:\n\n >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])\n >>> y = tf.convert_to_tensor(1)\n >>> x + y\n \n\n Add a tensor and a list of same shape:\n\n >>> x = [1, 2, 3, 4, 5]\n >>> y = tf.constant([1, 2, 3, 4, 5])\n >>> tf.add(x, y)\n \n\n **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a\n non-tensor, the non-tensor input will adopt (or get casted to) the data type\n of the tensor input. This can potentially cause unwanted overflow or underflow\n conversion.\n\n For example,\n\n >>> x = tf.constant([1, 2], dtype=tf.int8)\n >>> y = [2**7 + 1, 2**7 + 2]\n >>> tf.add(x, y)\n \n\n When adding two input values of different shapes, `Add` follows NumPy\n broadcasting rules. The two input array shapes are compared element-wise.\n Starting with the trailing dimensions, the two dimensions either have to be\n equal or one of them needs to be `1`.\n\n For example,\n\n >>> x = np.ones(6).reshape(1, 2, 1, 3)\n >>> y = np.ones(6).reshape(2, 1, 3, 1)\n >>> tf.add(x, y).shape.as_list()\n [2, 2, 3, 3]\n\n Another example with two arrays of different dimension.\n\n >>> x = np.ones([1, 2, 1, 4])\n >>> y = np.ones([3, 4])\n >>> tf.add(x, y).shape.as_list()\n [1, 2, 3, 4]\n\n The reduction version of this elementwise operation is `tf.math.reduce_sum`\n\n Args:\n x: A `tf.Tensor`. Must be one of the following types: bfloat16, half,\n float32, float64, uint8, int8, int16, int32, int64, complex64, complex128,\n string.\n y: A `tf.Tensor`. Must have the same type as x.\n name: A name for the operation (optional)\n "}, {"name": "add_n", "path": "./tf/math/add_n.md", "desc": "Adds all input tensors element-wise.", "type": "Functions", "docs": "Adds all input tensors element-wise.\n\n `tf.math.add_n` performs the same operation as `tf.math.accumulate_n`.\n\n This op does not [broadcast](\n https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)\n its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)\n instead.\n\n For example:\n\n >>> a = tf.constant([[3, 5], [4, 8]])\n >>> b = tf.constant([[1, 6], [2, 9]])\n >>> tf.math.add_n([a, b, a])\n \n\n Args:\n inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the\n same shape and type. `tf.IndexedSlices` objects will be converted into\n dense tensors prior to adding.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of the same shape and type as the elements of `inputs`.\n\n Raises:\n ValueError: If `inputs` don't all have same shape and dtype or the shape\n cannot be inferred.\n "}, {"name": "argmax", "path": "./tf/math/argmax.md", "desc": "Returns the index with the largest value across axes of a tensor.", "type": "Functions", "docs": "Returns the index with the largest value across axes of a tensor.\n\n In case of identity returns the smallest index.\n\n For example:\n\n >>> A = tf.constant([2, 20, 30, 3, 6])\n >>> tf.math.argmax(A) # A[2] is maximum in tensor A\n \n >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],\n ... [14, 45, 23, 5, 27]])\n >>> tf.math.argmax(B, 0)\n \n >>> tf.math.argmax(B, 1)\n \n >>> C = tf.constant([0, 0, 0, 0])\n >>> tf.math.argmax(C) # Returns smallest index in case of ties\n \n\n Args:\n input: A `Tensor`.\n axis: An integer, the axis to reduce across. Default to 0.\n output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults\n to `tf.int64`.\n name: An optional name for the operation.\n\n Returns:\n A `Tensor` of type `output_type`.\n "}, {"name": "argmin", "path": "./tf/math/argmin.md", "desc": "Returns the index with the smallest value across axes of a tensor.", "type": "Functions", "docs": "Returns the index with the smallest value across axes of a tensor.\n\n Returns the smallest index in case of ties.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,\n `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,\n `uint64`.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n int32 or int64, must be in the range `-rank(input), rank(input))`.\n Describes which axis of the input Tensor to reduce across. For vectors,\n use axis = 0.\n output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to\n `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `output_type`.\n\n Usage:\n ```python\n import tensorflow as tf\n a = [1, 10, 26.9, 2.8, 166.32, 62.3]\n b = tf.math.argmin(input = a)\n c = tf.keras.backend.eval(b)\n # c = 0\n # here a[0] = 1 which is the smallest element of a across axis 0\n ```\n "}, {"name": "argsort", "path": "./tf/argsort.md", "desc": "Returns the indices of a tensor that give its sorted order along an axis.", "type": "Functions", "docs": "Returns the indices of a tensor that give its sorted order along an axis.\n\n >>> values = [1, 10, 26.9, 2.8, 166.32, 62.3]\n >>> sort_order = tf.argsort(values)\n >>> sort_order.numpy()\n array([0, 3, 1, 2, 5, 4], dtype=int32)\n\n For a 1D tensor:\n\n >>> sorted = tf.gather(values, sort_order)\n >>> assert tf.reduce_all(sorted == tf.sort(values))\n\n For higher dimensions, the output has the same shape as\n `values`, but along the given axis, values represent the index of the sorted\n element in that slice of the tensor at the given position.\n\n >>> mat = [[30,20,10],\n ... [20,10,30],\n ... [10,30,20]]\n >>> indices = tf.argsort(mat)\n >>> indices.numpy()\n array([[2, 1, 0],\n [1, 0, 2],\n [0, 2, 1]], dtype=int32)\n\n If `axis=-1` these indices can be used to apply a sort using `tf.gather`:\n\n >>> tf.gather(mat, indices, batch_dims=-1).numpy()\n array([[10, 20, 30],\n [10, 20, 30],\n [10, 20, 30]], dtype=int32)\n\n See also:\n\n * `tf.sort`: Sort along an axis.\n * `tf.math.top_k`: A partial sort that returns a fixed number of top values\n and corresponding indices.\n\n Args:\n values: 1-D or higher **numeric** `Tensor`.\n axis: The axis along which to sort. The default is -1, which sorts the last\n axis.\n direction: The direction in which to sort the values (`'ASCENDING'` or\n `'DESCENDING'`).\n stable: If True, equal elements in the original tensor will not be\n re-ordered in the returned order. Unstable sort is not yet implemented,\n but will eventually be the default for performance reasons. If you require\n a stable order, pass `stable=True` for forwards compatibility.\n name: Optional name for the operation.\n\n Returns:\n An int32 `Tensor` with the same shape as `values`. The indices that would\n sort each slice of the given `values` along the given `axis`.\n\n Raises:\n ValueError: If axis is not a constant scalar, or the direction is invalid.\n tf.errors.InvalidArgumentError: If the `values.dtype` is not a `float` or\n `int` type.\n "}, {"name": "as_dtype", "path": "./tf/dtypes/as_dtype.md", "desc": "Converts the given `type_value` to a `DType`.", "type": "Functions", "docs": "Converts the given `type_value` to a `DType`.\n\n Note: `DType` values are interned. When passed a new `DType` object,\n `as_dtype` always returns the interned value.\n\n Args:\n type_value: A value that can be converted to a `tf.DType` object. This may\n currently be a `tf.DType` object, a [`DataType`\n enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),\n a string type name, or a [`numpy.dtype`](https://numpy.org/doc/stable/reference/generated/numpy.dtype.html).\n\n Returns:\n A `DType` corresponding to `type_value`.\n\n Raises:\n TypeError: If `type_value` cannot be converted to a `DType`.\n "}, {"name": "as_string", "path": "./tf/strings/as_string.md", "desc": "Converts each entry in the given tensor to strings.", "type": "Functions", "docs": "Converts each entry in the given tensor to strings.\n\n Supports many numeric types and boolean.\n\n For Unicode, see the\n [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text)\n tutorial.\n\n Examples:\n\n >>> tf.strings.as_string([3, 2])\n \n >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy()\n array([b'3.14', b'2.72'], dtype=object)\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `complex64`, `complex128`, `bool`, `variant`.\n precision: An optional `int`. Defaults to `-1`.\n The post-decimal precision to use for floating point numbers.\n Only used if precision > -1.\n scientific: An optional `bool`. Defaults to `False`.\n Use scientific notation for floating point numbers.\n shortest: An optional `bool`. Defaults to `False`.\n Use shortest representation (either scientific or standard) for\n floating point numbers.\n width: An optional `int`. Defaults to `-1`.\n Pad pre-decimal numbers to this width.\n Applies to both floating point and integer numbers.\n Only used if width > -1.\n fill: An optional `string`. Defaults to `\"\"`.\n The value to pad if width > -1. If empty, pads with spaces.\n Another typical value is '0'. String cannot be longer than 1 character.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `string`.\n "}, {"name": "asin", "path": "./tf/math/asin.md", "desc": "Computes the trignometric inverse sine of x element-wise.", "type": "Functions", "docs": "Computes the trignometric inverse sine of x element-wise.\n\n The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that\n if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.\n\n **Note**: The output of `tf.math.asin` will lie within the invertible range\n of sine, i.e [-pi/2, pi/2].\n\n For example:\n\n ```python\n # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]\n x = tf.constant([1.047, 0.785])\n y = tf.math.sin(x) # [0.8659266, 0.7068252]\n\n tf.math.asin(y) # [1.047, 0.785] = x\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "asinh", "path": "./tf/math/asinh.md", "desc": "Computes inverse hyperbolic sine of x element-wise.", "type": "Functions", "docs": "Computes inverse hyperbolic sine of x element-wise.\n\n Given an input tensor, this function computes inverse hyperbolic sine\n for every element in the tensor. Both input and output has a range of\n `[-inf, inf]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -2, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "assert_equal", "path": "./tf/debugging/assert_equal.md", "desc": "Assert the condition `x == y` holds element-wise.", "type": "Functions", "docs": "Assert the condition `x == y` holds element-wise.\n\n This Op checks that `x[i] == y[i]` holds for every pair of (possibly\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\n trivially satisfied.\n\n If `x` and `y` are not equal, `message`, as well as the first `summarize`\n entries of `x` and `y` are printed, and `InvalidArgumentError` is raised.\n\n Args:\n x: Numeric `Tensor`.\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\n message: A string to prefix to the default message.\n summarize: Print this many entries of each tensor.\n name: A name for this operation (optional). Defaults to \"assert_equal\".\n\n Returns:\n Op that raises `InvalidArgumentError` if `x == y` is False. This can be\n used with `tf.control_dependencies` inside of `tf.function`s to block\n followup computation until the check has executed.\n @compatibility(eager)\n returns None\n @end_compatibility\n\n Raises:\n InvalidArgumentError: if the check can be performed immediately and\n `x == y` is False. The check can be performed immediately during eager\n execution or if `x` and `y` are statically known.\n "}, {"name": "assert_greater", "path": "./tf/debugging/assert_greater.md", "desc": "Assert the condition `x > y` holds element-wise.", "type": "Functions", "docs": "Assert the condition `x > y` holds element-wise.\n\n This Op checks that `x[i] > y[i]` holds for every pair of (possibly\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\n trivially satisfied.\n\n If `x` is not greater than `y` element-wise, `message`, as well as the first\n `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is\n raised.\n\n Args:\n x: Numeric `Tensor`.\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\n message: A string to prefix to the default message.\n summarize: Print this many entries of each tensor.\n name: A name for this operation (optional). Defaults to \"assert_greater\".\n\n Returns:\n Op that raises `InvalidArgumentError` if `x > y` is False. This can be\n used with `tf.control_dependencies` inside of `tf.function`s to block\n followup computation until the check has executed.\n @compatibility(eager)\n returns None\n @end_compatibility\n\n Raises:\n InvalidArgumentError: if the check can be performed immediately and\n `x > y` is False. The check can be performed immediately during eager\n execution or if `x` and `y` are statically known.\n "}, {"name": "assert_less", "path": "./tf/debugging/assert_less.md", "desc": "Assert the condition `x < y` holds element-wise.", "type": "Functions", "docs": "Assert the condition `x < y` holds element-wise.\n\n This Op checks that `x[i] < y[i]` holds for every pair of (possibly\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\n trivially satisfied.\n\n If `x` is not less than `y` element-wise, `message`, as well as the first\n `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is\n raised.\n\n Args:\n x: Numeric `Tensor`.\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\n message: A string to prefix to the default message.\n summarize: Print this many entries of each tensor.\n name: A name for this operation (optional). Defaults to \"assert_less\".\n\n Returns:\n Op that raises `InvalidArgumentError` if `x < y` is False.\n This can be used with `tf.control_dependencies` inside of `tf.function`s\n to block followup computation until the check has executed.\n @compatibility(eager)\n returns None\n @end_compatibility\n\n Raises:\n InvalidArgumentError: if the check can be performed immediately and\n `x < y` is False. The check can be performed immediately during eager\n execution or if `x` and `y` are statically known.\n "}, {"name": "assert_rank", "path": "./tf/debugging/assert_rank.md", "desc": "Assert that `x` has rank equal to `rank`.", "type": "Functions", "docs": "Assert that `x` has rank equal to `rank`.\n\n This Op checks that the rank of `x` is equal to `rank`.\n\n If `x` has a different rank, `message`, as well as the shape of `x` are\n printed, and `InvalidArgumentError` is raised.\n\n Args:\n x: `Tensor`.\n rank: Scalar integer `Tensor`.\n message: A string to prefix to the default message.\n name: A name for this operation (optional). Defaults to\n \"assert_rank\".\n\n Returns:\n Op raising `InvalidArgumentError` unless `x` has specified rank.\n If static checks determine `x` has correct rank, a `no_op` is returned.\n This can be used with `tf.control_dependencies` inside of `tf.function`s\n to block followup computation until the check has executed.\n @compatibility(eager)\n returns None\n @end_compatibility\n\n Raises:\n InvalidArgumentError: if the check can be performed immediately and\n `x` does not have rank `rank`. The check can be performed immediately\n during eager execution or if the shape of `x` is statically known.\n "}, {"name": "atan", "path": "./tf/math/atan.md", "desc": "Computes the trignometric inverse tangent of x element-wise.", "type": "Functions", "docs": "Computes the trignometric inverse tangent of x element-wise.\n\n The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that\n if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`.\n\n **Note**: The output of `tf.math.atan` will lie within the invertible range\n of tan, i.e (-pi/2, pi/2).\n\n For example:\n\n ```python\n # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]\n x = tf.constant([1.047, 0.785])\n y = tf.math.tan(x) # [1.731261, 0.99920404]\n\n tf.math.atan(y) # [1.047, 0.785] = x\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "atan2", "path": "./tf/math/atan2.md", "desc": "Computes arctangent of `y/x` element-wise, respecting signs of the arguments.", "type": "Functions", "docs": "Computes arctangent of `y/x` element-wise, respecting signs of the arguments.\n\n This is the angle \\\\( \\theta \\in [-\\pi, \\pi] \\\\) such that\n \\\\[ x = r \\cos(\\theta) \\\\]\n and\n \\\\[ y = r \\sin(\\theta) \\\\]\n where \\\\(r = \\sqrt{x^2 + y^2} \\\\).\n\n For example:\n\n >>> x = [1., 1.]\n >>> y = [1., -1.]\n >>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy())\n [ 45. -45.]\n\n Args:\n y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.\n x: A `Tensor`. Must have the same type as `y`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `y`.\n "}, {"name": "atanh", "path": "./tf/math/atanh.md", "desc": "Computes inverse hyperbolic tangent of x element-wise.", "type": "Functions", "docs": "Computes inverse hyperbolic tangent of x element-wise.\n\n Given an input tensor, this function computes inverse hyperbolic tangent\n for every element in the tensor. Input range is `[-1,1]` and output range is\n `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the\n input is `1`, output will be `inf`. Values outside the range will have\n `nan` as output.\n\n ```python\n x = tf.constant([-float(\"inf\"), -1, -0.5, 1, 0, 0.5, 10, float(\"inf\")])\n tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "batch_to_space", "path": "./tf/batch_to_space.md", "desc": "BatchToSpace for N-D tensors of type T.", "type": "Functions", "docs": "BatchToSpace for N-D tensors of type T.\n\n This operation reshapes the \"batch\" dimension 0 into `M + 1` dimensions of\n shape `block_shape + [batch]`, interleaves these blocks back into the grid\n defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the\n same rank as the input. The spatial dimensions of this intermediate result\n are then optionally cropped according to `crops` to produce the output. This\n is the reverse of SpaceToBatch (see `tf.space_to_batch`).\n\n Args:\n input: A N-D `Tensor` with shape `input_shape = [batch] + spatial_shape +\n remaining_shape`, where `spatial_shape` has M dimensions.\n block_shape: A 1-D `Tensor` with shape [M]. Must be one of the following\n types: `int32`, `int64`. All values must be >= 1. For backwards\n compatibility with TF 1.0, this parameter may be an int, in which case it\n is converted to\n `numpy.array([block_shape, block_shape],\n dtype=numpy.int64)`.\n crops: A 2-D `Tensor` with shape `[M, 2]`. Must be one of the\n following types: `int32`, `int64`. All values must be >= 0.\n `crops[i] = [crop_start, crop_end]` specifies the amount to crop from\n input dimension `i + 1`, which corresponds to spatial dimension `i`.\n It is required that\n `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n This operation is equivalent to the following steps:\n 1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,\n block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,\n input_shape[N-1]]\n 2. Permute dimensions of `reshaped` to produce `permuted` of shape\n [batch / prod(block_shape), input_shape[1], block_shape[0], ...,\n input_shape[M], block_shape[M-1], input_shape[M+1],\n ..., input_shape[N-1]]\n 3. Reshape `permuted` to produce `reshaped_permuted` of shape\n [batch / prod(block_shape), input_shape[1] * block_shape[0], ...,\n input_shape[M] * block_shape[M-1], input_shape[M+1], ...,\n input_shape[N-1]]\n 4. Crop the start and end of dimensions `[1, ..., M]` of\n `reshaped_permuted` according to `crops` to produce the output\n of shape:\n [batch / prod(block_shape), input_shape[1] *\n block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *\n block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1],\n ..., input_shape[N-1]]\n name: A name for the operation (optional).\n\n Examples:\n\n 1. For the following input of shape `[4, 1, 1, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n [[[[1]]],\n [[[2]]],\n [[[3]]],\n [[[4]]]]\n ```\n\n The output tensor has shape `[1, 2, 2, 1]` and value:\n\n ```\n x = [[[[1], [2]],\n [[3], [4]]]]\n ```\n\n 2. For the following input of shape `[4, 1, 1, 3]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n [[[1, 2, 3]],\n [[4, 5, 6]],\n [[7, 8, 9]],\n [[10, 11, 12]]]\n ```\n\n The output tensor has shape `[1, 2, 2, 3]` and value:\n\n ```python\n x = [[[[1, 2, 3], [4, 5, 6 ]],\n [[7, 8, 9], [10, 11, 12]]]]\n ```\n\n 3. For the following\n input of shape `[4, 2, 2, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n x = [[[[1], [3]], [[ 9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n ```\n\n The output tensor has shape `[1, 4, 4, 1]` and value:\n\n ```python\n x = [[[1], [2], [ 3], [ 4]],\n [[5], [6], [ 7], [ 8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]\n ```\n\n 4. For the following input of shape\n `[8, 1, 3, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:\n\n ```python\n x = [[[[0], [ 1], [ 3]]],\n [[[0], [ 9], [11]]],\n [[[0], [ 2], [ 4]]],\n [[[0], [10], [12]]],\n [[[0], [ 5], [ 7]]],\n [[[0], [13], [15]]],\n [[[0], [ 6], [ 8]]],\n [[[0], [14], [16]]]]\n ```\n\n The output tensor has shape `[2, 2, 4, 1]` and value:\n\n ```python\n x = [[[[ 1], [ 2], [ 3], [ 4]],\n [[ 5], [ 6], [ 7], [ 8]]],\n [[[ 9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "}, {"name": "bitcast", "path": "./tf/bitcast.md", "desc": "Bitcasts a tensor from one type to another without copying data.", "type": "Functions", "docs": "Bitcasts a tensor from one type to another without copying data.\n\n Given a tensor `input`, this operation returns a tensor that has the same buffer\n data as `input` with datatype `type`.\n\n If the input datatype `T` is larger than the output datatype `type` then the\n shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].\n\n If `T` is smaller than `type`, the operator requires that the rightmost\n dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from\n [..., sizeof(`type`)/sizeof(`T`)] to [...].\n\n tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype\n (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()\n gives module error.\n For example,\n\n Example 1:\n\n >>> a = [1., 2., 3.]\n >>> equality_bitcast = tf.bitcast(a, tf.complex128)\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]\n >>> equality_cast = tf.cast(a, tf.complex128)\n >>> print(equality_cast)\n tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)\n\n Example 2:\n\n >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)\n \n\n Example 3:\n\n >>> x = [1., 2., 3.]\n >>> y = [0., 2., 3.]\n >>> equality= tf.equal(x,y)\n >>> equality_cast = tf.cast(equality,tf.float32)\n >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)\n >>> print(equality)\n tf.Tensor([False True True], shape=(3,), dtype=bool)\n >>> print(equality_cast)\n tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)\n >>> print(equality_bitcast)\n tf.Tensor(\n [[ 0 0 0 0]\n [ 0 0 128 63]\n [ 0 0 128 63]], shape=(3, 4), dtype=uint8)\n\n *NOTE*: Bitcast is implemented as a low-level cast, so machines with different\n endian orderings will give different results.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`.\n type: A `tf.DType` from: `tf.bfloat16, tf.half, tf.float32, tf.float64, tf.int64, tf.int32, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16, tf.complex64, tf.complex128, tf.qint8, tf.quint8, tf.qint16, tf.quint16, tf.qint32`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `type`.\n "}, {"name": "boolean_mask", "path": "./tf/boolean_mask.md", "desc": "Apply boolean mask to tensor.", "type": "Functions", "docs": "Apply boolean mask to tensor.\n\n Numpy equivalent is `tensor[mask]`.\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n The `axis` could be used with `mask` to indicate the axis to mask from.\n In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match\n the first `axis + dim(mask)` dimensions of `tensor`'s shape.\n\n See also: `tf.ragged.boolean_mask`, which can be applied to both dense and\n ragged tensors, and can be used if you need to preserve the masked dimensions\n of `tensor` (rather than flattening them, as `tf.boolean_mask` does).\n\n Examples:\n\n >>> tensor = [0, 1, 2, 3] # 1-D example\n >>> mask = np.array([True, False, True, False])\n >>> tf.boolean_mask(tensor, mask)\n \n\n >>> tensor = [[1, 2], [3, 4], [5, 6]] # 2-D example\n >>> mask = np.array([True, False, True])\n >>> tf.boolean_mask(tensor, mask)\n \n\n Args:\n tensor: N-D Tensor.\n mask: K-D boolean Tensor, K <= N and K must be known statically.\n axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By\n default, axis is 0 which will mask from the first dimension. Otherwise K +\n axis <= N.\n name: A name for this operation (optional).\n\n Returns:\n (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding\n to `True` values in `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n\n Examples:\n\n ```python\n # 2-D example\n tensor = [[1, 2], [3, 4], [5, 6]]\n mask = np.array([True, False, True])\n boolean_mask(tensor, mask) # [[1, 2], [5, 6]]\n ```\n "}, {"name": "broadcast_dynamic_shape", "path": "./tf/broadcast_dynamic_shape.md", "desc": "Computes the shape of a broadcast given symbolic shapes.", "type": "Functions", "docs": "Computes the shape of a broadcast given symbolic shapes.\n\n When `shape_x` and `shape_y` are Tensors representing shapes (i.e. the result\n of calling tf.shape on another Tensor) this computes a Tensor which is the\n shape of the result of a broadcasting op applied in tensors of shapes\n `shape_x` and `shape_y`.\n\n This is useful when validating the result of a broadcasting operation when the\n tensors do not have statically known shapes.\n\n Example:\n\n >>> shape_x = (1, 2, 3)\n >>> shape_y = (5, 1, 3)\n >>> tf.broadcast_dynamic_shape(shape_x, shape_y)\n \n\n Args:\n shape_x: A rank 1 integer `Tensor`, representing the shape of x.\n shape_y: A rank 1 integer `Tensor`, representing the shape of y.\n\n Returns:\n A rank 1 integer `Tensor` representing the broadcasted shape.\n\n Raises:\n InvalidArgumentError: If the two shapes are incompatible for\n broadcasting.\n "}, {"name": "broadcast_static_shape", "path": "./tf/broadcast_static_shape.md", "desc": "Computes the shape of a broadcast given known shapes.", "type": "Functions", "docs": "Computes the shape of a broadcast given known shapes.\n\n When `shape_x` and `shape_y` are fully known `TensorShape`s this computes a\n `TensorShape` which is the shape of the result of a broadcasting op applied in\n tensors of shapes `shape_x` and `shape_y`.\n\n For example, if shape_x is `TensorShape([1, 2, 3])` and shape_y is\n `TensorShape([5, 1, 3])`, the result is a TensorShape whose value is\n `TensorShape([5, 2, 3])`.\n\n This is useful when validating the result of a broadcasting operation when the\n tensors have statically known shapes.\n\n Example:\n\n >>> shape_x = tf.TensorShape([1, 2, 3])\n >>> shape_y = tf.TensorShape([5, 1 ,3])\n >>> tf.broadcast_static_shape(shape_x, shape_y)\n TensorShape([5, 2, 3])\n\n Args:\n shape_x: A `TensorShape`\n shape_y: A `TensorShape`\n\n Returns:\n A `TensorShape` representing the broadcasted shape.\n\n Raises:\n ValueError: If the two shapes can not be broadcasted.\n "}, {"name": "broadcast_to", "path": "./tf/broadcast_to.md", "desc": "Broadcast an array for a compatible shape.", "type": "Functions", "docs": "Broadcast an array for a compatible shape.\n\n Broadcasting is the process of making arrays to have compatible shapes\n for arithmetic operations. Two shapes are compatible if for each\n dimension pair they are either equal or one of them is one. When trying\n to broadcast a Tensor to a shape, it starts with the trailing dimensions,\n and works its way forward.\n\n For example,\n\n >>> x = tf.constant([1, 2, 3])\n >>> y = tf.broadcast_to(x, [3, 3])\n >>> print(y)\n tf.Tensor(\n [[1 2 3]\n [1 2 3]\n [1 2 3]], shape=(3, 3), dtype=int32)\n\n In the above example, the input Tensor with the shape of `[1, 3]`\n is broadcasted to output Tensor with shape of `[3, 3]`.\n\n When doing broadcasted operations such as multiplying a tensor\n by a scalar, broadcasting (usually) confers some time or space\n benefit, as the broadcasted tensor is never materialized.\n\n However, `broadcast_to` does not carry with it any such benefits.\n The newly-created tensor takes the full memory of the broadcasted\n shape. (In a graph context, `broadcast_to` might be fused to\n subsequent operation and then be optimized away, however.)\n\n Args:\n input: A `Tensor`. A Tensor to broadcast.\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n An 1-D `int` Tensor. The shape of the desired output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "}, {"name": "case", "path": "./tf/case.md", "desc": "Create a case operation.", "type": "Functions", "docs": "Create a case operation.\n\n See also `tf.switch_case`.\n\n The `pred_fn_pairs` parameter is a list of pairs of size N.\n Each pair contains a boolean scalar tensor and a python callable that\n creates the tensors to be returned if the boolean evaluates to True.\n `default` is a callable generating a list of tensors. All the callables\n in `pred_fn_pairs` as well as `default` (if provided) should return the same\n number and types of tensors.\n\n If `exclusive==True`, all predicates are evaluated, and an exception is\n thrown if more than one of the predicates evaluates to `True`.\n If `exclusive==False`, execution stops at the first predicate which\n evaluates to True, and the tensors generated by the corresponding function\n are returned immediately. If none of the predicates evaluate to True, this\n operation returns the tensors generated by `default`.\n\n `tf.case` supports nested structures as implemented in\n `tf.nest`. All of the callables must return the same (possibly nested) value\n structure of lists, tuples, and/or named tuples. Singleton lists and tuples\n form the only exceptions to this: when returned by a callable, they are\n implicitly unpacked to single values. This behavior is disabled by passing\n `strict=True`.\n\n @compatibility(v2)\n `pred_fn_pairs` could be a dictionary in v1. However, tf.Tensor and\n tf.Variable are no longer hashable in v2, so cannot be used as a key for a\n dictionary. Please use a list or a tuple instead.\n @end_compatibility\n\n\n **Example 1:**\n\n Pseudocode:\n\n ```\n if (x < y) return 17;\n else return 23;\n ```\n\n Expressions:\n\n ```python\n f1 = lambda: tf.constant(17)\n f2 = lambda: tf.constant(23)\n r = tf.case([(tf.less(x, y), f1)], default=f2)\n ```\n\n **Example 2:**\n\n Pseudocode:\n\n ```\n if (x < y && x > z) raise OpError(\"Only one predicate may evaluate to True\");\n if (x < y) return 17;\n else if (x > z) return 23;\n else return -1;\n ```\n\n Expressions:\n\n ```python\n def f1(): return tf.constant(17)\n def f2(): return tf.constant(23)\n def f3(): return tf.constant(-1)\n r = tf.case([(tf.less(x, y), f1), (tf.greater(x, z), f2)],\n default=f3, exclusive=True)\n ```\n\n Args:\n pred_fn_pairs: List of pairs of a boolean scalar tensor and a callable which\n returns a list of tensors.\n default: Optional callable that returns a list of tensors.\n exclusive: True iff at most one predicate is allowed to evaluate to `True`.\n strict: A boolean that enables/disables 'strict' mode; see above.\n name: A name for this operation (optional).\n\n Returns:\n The tensors returned by the first pair whose predicate evaluated to True, or\n those returned by `default` if none does.\n\n Raises:\n TypeError: If `pred_fn_pairs` is not a list/tuple.\n TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\n TypeError: If `fns[i]` is not callable for any i, or `default` is not\n callable.\n "}, {"name": "cast", "path": "./tf/cast.md", "desc": "Casts a tensor to a new type.", "type": "Functions", "docs": "Casts a tensor to a new type.\n\n The operation casts `x` (in case of `Tensor`) or `x.values`\n (in case of `SparseTensor` or `IndexedSlices`) to `dtype`.\n\n For example:\n\n >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)\n >>> tf.cast(x, tf.int32)\n \n\n Notice `tf.cast` has an alias `tf.dtypes.cast`:\n\n >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)\n >>> tf.dtypes.cast(x, tf.int32)\n \n\n The operation supports data types (for `x` and `dtype`) of\n `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,\n `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.\n In case of casting from complex types (`complex64`, `complex128`) to real\n types, only the real part of `x` is returned. In case of casting from real\n types to complex types (`complex64`, `complex128`), the imaginary part of the\n returned value is set to `0`. The handling of complex types here matches the\n behavior of numpy.\n\n Note casting nan and inf values to integral types has undefined behavior.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could\n be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,\n `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,\n `bfloat16`.\n dtype: The destination type. The list of supported dtypes is the same as\n `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and\n same type as `dtype`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `dtype`.\n "}, {"name": "clip_by_global_norm", "path": "./tf/clip_by_global_norm.md", "desc": "Clips values of multiple tensors by the ratio of the sum of their norms.", "type": "Functions", "docs": "Clips values of multiple tensors by the ratio of the sum of their norms.\n\n Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,\n this operation returns a list of clipped tensors `list_clipped`\n and the global norm (`global_norm`) of all tensors in `t_list`. Optionally,\n if you've already computed the global norm for `t_list`, you can specify\n the global norm with `use_norm`.\n\n To perform the clipping, the values `t_list[i]` are set to:\n\n t_list[i] * clip_norm / max(global_norm, clip_norm)\n\n where:\n\n global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))\n\n If `clip_norm > global_norm` then the entries in `t_list` remain as they are,\n otherwise they're all shrunk by the global ratio.\n\n If `global_norm == infinity` then the entries in `t_list` are all set to `NaN`\n to signal that an error occurred.\n\n Any of the entries of `t_list` that are of type `None` are ignored.\n\n This is the correct way to perform gradient clipping (Pascanu et al., 2012).\n\n However, it is slower than `clip_by_norm()` because all the parameters must be\n ready before the clipping operation can be performed.\n\n Args:\n t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.\n clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.\n use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global\n norm to use. If not provided, `global_norm()` is used to compute the norm.\n name: A name for the operation (optional).\n\n Returns:\n list_clipped: A list of `Tensors` of the same type as `list_t`.\n global_norm: A 0-D (scalar) `Tensor` representing the global norm.\n\n Raises:\n TypeError: If `t_list` is not a sequence.\n\n References:\n On the difficulty of training Recurrent Neural Networks:\n [Pascanu et al., 2012](http://proceedings.mlr.press/v28/pascanu13.html)\n ([pdf](http://proceedings.mlr.press/v28/pascanu13.pdf))\n "}, {"name": "clip_by_norm", "path": "./tf/clip_by_norm.md", "desc": "Clips tensor values to a maximum L2-norm.", "type": "Functions", "docs": "Clips tensor values to a maximum L2-norm.\n\n Given a tensor `t`, and a maximum clip value `clip_norm`, this operation\n normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,\n along the dimensions given in `axes`. Specifically, in the default case\n where all dimensions are used for calculation, if the L2-norm of `t` is\n already less than or equal to `clip_norm`, then `t` is not modified. If\n the L2-norm is greater than `clip_norm`, then this operation returns a\n tensor of the same type and shape as `t` with its values set to:\n\n `t * clip_norm / l2norm(t)`\n\n In this case, the L2-norm of the output tensor is `clip_norm`.\n\n As another example, if `t` is a matrix and `axes == [1]`, then each row\n of the output will have L2-norm less than or equal to `clip_norm`. If\n `axes == [0]` instead, each column of the output will be clipped.\n\n Code example:\n\n >>> some_nums = tf.constant([[1, 2, 3, 4, 5]], dtype=tf.float32)\n >>> tf.clip_by_norm(some_nums, 2.0).numpy()\n array([[0.26967996, 0.5393599 , 0.80903983, 1.0787199 , 1.3483998 ]],\n dtype=float32)\n\n This operation is typically used to clip gradients before applying them with\n an optimizer. Most gradient data is a collection of different shaped tensors\n for different parts of the model. Thus, this is a common usage:\n\n ```\n # Get your gradients after training\n loss_value, grads = grad(model, features, labels)\n\n # Apply some clipping\n grads = [tf.clip_by_norm(g, norm)\n for g in grads]\n\n # Continue on with training\n optimizer.apply_gradients(grads)\n ```\n\n Args:\n t: A `Tensor` or `IndexedSlices`. This must be a floating point type.\n clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value, also\n floating point\n axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions\n to use for computing the L2-norm. If `None` (the default), uses all\n dimensions.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor` or `IndexedSlices`.\n\n Raises:\n ValueError: If the clip_norm tensor is not a 0-D scalar tensor.\n TypeError: If dtype of the input is not a floating point or\n complex type.\n "}, {"name": "clip_by_value", "path": "./tf/clip_by_value.md", "desc": "Clips tensor values to a specified min and max.", "type": "Functions", "docs": "Clips tensor values to a specified min and max.\n\n Given a tensor `t`, this operation returns a tensor of the same type and\n shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.\n Any values less than `clip_value_min` are set to `clip_value_min`. Any values\n greater than `clip_value_max` are set to `clip_value_max`.\n\n Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for\n correct results.\n\n For example:\n\n Basic usage passes a scalar as the min and max value.\n\n >>> t = tf.constant([[-10., -1., 0.], [0., 2., 10.]])\n >>> t2 = tf.clip_by_value(t, clip_value_min=-1, clip_value_max=1)\n >>> t2.numpy()\n array([[-1., -1., 0.],\n [ 0., 1., 1.]], dtype=float32)\n\n The min and max can be the same size as `t`, or broadcastable to that size.\n\n >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n >>> clip_min = [[2],[1]]\n >>> t3 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\n >>> t3.numpy()\n array([[ 2., 2., 10.],\n [ 1., 1., 10.]], dtype=float32)\n\n Broadcasting fails, intentionally, if you would expand the dimensions of `t`\n\n >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n >>> clip_min = [[[2, 1]]] # Has a third axis\n >>> t4 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Incompatible shapes: [2,3] vs. [1,1,2]\n\n It throws a `TypeError` if you try to clip an `int` to a `float` value\n (`tf.cast` the input to `float` first).\n\n >>> t = tf.constant([[1, 2], [3, 4]], dtype=tf.int32)\n >>> t5 = tf.clip_by_value(t, clip_value_min=-3.1, clip_value_max=3.1)\n Traceback (most recent call last):\n ...\n TypeError: Cannot convert ...\n\n\n Args:\n t: A `Tensor` or `IndexedSlices`.\n clip_value_min: The minimum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n clip_value_max: The maximum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n name: A name for the operation (optional).\n\n Returns:\n A clipped `Tensor` or `IndexedSlices`.\n\n Raises:\n `tf.errors.InvalidArgumentError`: If the clip tensors would trigger array\n broadcasting that would make the returned tensor larger than the input.\n TypeError: If dtype of the input is `int32` and dtype of\n the `clip_value_min` or `clip_value_max` is `float32`\n "}, {"name": "complex", "path": "./tf/dtypes/complex.md", "desc": "Converts two real numbers to a complex number.", "type": "Functions", "docs": "Converts two real numbers to a complex number.\n\n Given a tensor `real` representing the real part of a complex number, and a\n tensor `imag` representing the imaginary part of a complex number, this\n operation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n *a* represents the `real` part and *b* represents the `imag` part.\n\n The input tensors `real` and `imag` must have the same shape.\n\n For example:\n\n ```python\n real = tf.constant([2.25, 3.25])\n imag = tf.constant([4.75, 5.75])\n tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]\n ```\n\n Args:\n real: A `Tensor`. Must be one of the following types: `float32`, `float64`.\n imag: A `Tensor`. Must have the same type as `real`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `complex64` or `complex128`.\n\n Raises:\n TypeError: Real and imag must be correct types\n "}, {"name": "concat", "path": "./tf/concat.md", "desc": "Concatenates tensors along one dimension.", "type": "Functions", "docs": "Concatenates tensors along one dimension.\n\n See also `tf.tile`, `tf.stack`, `tf.repeat`.\n\n Concatenates the list of tensors `values` along dimension `axis`. If\n `values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated\n result has shape\n\n [D0, D1, ... Raxis, ...Dn]\n\n where\n\n Raxis = sum(Daxis(i))\n\n That is, the data from the input tensors is joined along the `axis`\n dimension.\n\n The number of dimensions of the input tensors must match, and all dimensions\n except `axis` must be equal.\n\n For example:\n\n >>> t1 = [[1, 2, 3], [4, 5, 6]]\n >>> t2 = [[7, 8, 9], [10, 11, 12]]\n >>> tf.concat([t1, t2], 0)\n \n\n >>> tf.concat([t1, t2], 1)\n \n\n As in Python, the `axis` could also be negative numbers. Negative `axis`\n are interpreted as counting from the end of the rank, i.e.,\n `axis + rank(values)`-th dimension.\n\n For example:\n\n >>> t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]\n >>> t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]\n >>> tf.concat([t1, t2], -1)\n \n\n Note: If you are concatenating along a new axis consider using stack.\n E.g.\n\n ```python\n tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)\n ```\n\n can be rewritten as\n\n ```python\n tf.stack(tensors, axis=axis)\n ```\n\n Args:\n values: A list of `Tensor` objects or a single `Tensor`.\n axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be\n in the range `[-rank(values), rank(values))`. As in Python, indexing for\n axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers\n to `axis`-th dimension. And negative axis refers to `axis +\n rank(values)`-th dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` resulting from concatenation of the input tensors.\n "}, {"name": "cond", "path": "./tf/cond.md", "desc": "Return `true_fn(", "type": "Functions", "docs": "Return `true_fn()` if the predicate `pred` is true else `false_fn()`.\n\n `true_fn` and `false_fn` both return lists of output tensors. `true_fn` and\n `false_fn` must have the same non-zero number and type of outputs.\n\n **WARNING**: Any Tensors or Operations created outside of `true_fn` and\n `false_fn` will be executed regardless of which branch is selected at runtime.\n\n Although this behavior is consistent with the dataflow model of TensorFlow,\n it has frequently surprised users who expected a lazier semantics.\n Consider the following simple program:\n\n ```python\n z = tf.multiply(a, b)\n result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))\n ```\n\n If `x < y`, the `tf.add` operation will be executed and `tf.square`\n operation will not be executed. Since `z` is needed for at least one\n branch of the `cond`, the `tf.multiply` operation is always executed,\n unconditionally.\n\n Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the\n call to `cond`, and not at all during `Session.run()`). `cond`\n stitches together the graph fragments created during the `true_fn` and\n `false_fn` calls with some additional graph nodes to ensure that the right\n branch gets executed depending on the value of `pred`.\n\n `tf.cond` supports nested structures as implemented in\n `tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the\n same (possibly nested) value structure of lists, tuples, and/or named tuples.\n Singleton lists and tuples form the only exceptions to this: when returned by\n `true_fn` and/or `false_fn`, they are implicitly unpacked to single values.\n\n Note: It is illegal to \"directly\" use tensors created inside a cond branch\n outside it, e.g. by storing a reference to a branch tensor in the python\n state. If you need to use a tensor created in a branch function you should\n return it as an output of the branch function and use the output from\n `tf.cond` instead.\n\n Args:\n pred: A scalar determining whether to return the result of `true_fn` or\n `false_fn`.\n true_fn: The callable to be performed if pred is true.\n false_fn: The callable to be performed if pred is false.\n name: Optional name prefix for the returned tensors.\n\n Returns:\n Tensors returned by the call to either `true_fn` or `false_fn`. If the\n callables return a singleton list, the element is extracted from the list.\n\n Raises:\n TypeError: if `true_fn` or `false_fn` is not callable.\n ValueError: if `true_fn` and `false_fn` do not return the same number of\n tensors, or return tensors of different types.\n\n Example:\n\n ```python\n x = tf.constant(2)\n y = tf.constant(5)\n def f1(): return tf.multiply(x, 17)\n def f2(): return tf.add(y, 23)\n r = tf.cond(tf.less(x, y), f1, f2)\n # r is set to f1().\n # Operations in f2 (e.g., tf.add) are not executed.\n ```\n\n "}, {"name": "constant", "path": "./tf/constant.md", "desc": "Creates a constant tensor from a tensor-like object.", "type": "Functions", "docs": "Creates a constant tensor from a tensor-like object.\n\n Note: All eager `tf.Tensor` values are immutable (in contrast to\n `tf.Variable`). There is nothing especially _constant_ about the value\n returned from `tf.constant`. This function is not fundamentally different from\n `tf.convert_to_tensor`. The name `tf.constant` comes from the `value` being\n embedded in a `Const` node in the `tf.Graph`. `tf.constant` is useful\n for asserting that the value can be embedded that way.\n\n If the argument `dtype` is not specified, then the type is inferred from\n the type of `value`.\n\n >>> # Constant 1-D Tensor from a python list.\n >>> tf.constant([1, 2, 3, 4, 5, 6])\n \n >>> # Or a numpy array\n >>> a = np.array([[1, 2, 3], [4, 5, 6]])\n >>> tf.constant(a)\n \n\n If `dtype` is specified, the resulting tensor values are cast to the requested\n `dtype`.\n\n >>> tf.constant([1, 2, 3, 4, 5, 6], dtype=tf.float64)\n \n\n If `shape` is set, the `value` is reshaped to match. Scalars are expanded to\n fill the `shape`:\n\n >>> tf.constant(0, shape=(2, 3))\n \n >>> tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n \n\n `tf.constant` has no effect if an eager Tensor is passed as the `value`, it\n even transmits gradients:\n\n >>> v = tf.Variable([0.0])\n >>> with tf.GradientTape() as g:\n ... loss = tf.constant(v + v)\n >>> g.gradient(loss, v).numpy()\n array([2.], dtype=float32)\n\n But, since `tf.constant` embeds the value in the `tf.Graph` this fails for\n symbolic tensors:\n\n >>> with tf.compat.v1.Graph().as_default():\n ... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32)\n ... t = tf.constant(i)\n Traceback (most recent call last):\n ...\n TypeError: ...\n\n `tf.constant` will create tensors on the current device. Inputs which are\n already tensors maintain their placements unchanged.\n\n Related Ops:\n\n * `tf.convert_to_tensor` is similar but:\n * It has no `shape` argument.\n * Symbolic tensors are allowed to pass through.\n\n >>> with tf.compat.v1.Graph().as_default():\n ... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32)\n ... t = tf.convert_to_tensor(i)\n\n * `tf.fill`: differs in a few ways:\n * `tf.constant` supports arbitrary constants, not just uniform scalar\n Tensors like `tf.fill`.\n * `tf.fill` creates an Op in the graph that is expanded at runtime, so it\n can efficiently represent large tensors.\n * Since `tf.fill` does not embed the value, it can produce dynamically\n sized outputs.\n\n Args:\n value: A constant value (or list) of output type `dtype`.\n dtype: The type of the elements of the resulting tensor.\n shape: Optional dimensions of resulting tensor.\n name: Optional name for the tensor.\n\n Returns:\n A Constant Tensor.\n\n Raises:\n TypeError: if shape is incorrectly specified or unsupported.\n ValueError: if called on a symbolic tensor.\n "}, {"name": "control_dependencies", "path": "./tf/control_dependencies.md", "desc": "Wrapper for Graph.control_dependencies(", "type": "Functions", "docs": "Wrapper for `Graph.control_dependencies()` using the default graph.\n\n See `tf.Graph.control_dependencies` for more details.\n\n Note: *In TensorFlow 2 with eager and/or Autograph, you should not require\n this method, as ops execute in the expected order thanks to automatic control\n dependencies.* Only use `tf.control_dependencies` when working with v1\n `tf.Graph` code.\n\n When eager execution is enabled, any callable object in the `control_inputs`\n list will be called.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which must be\n executed or computed before running the operations defined in the context.\n Can also be `None` to clear the control dependencies. If eager execution\n is enabled, any callable object in the `control_inputs` list will be\n called.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n "}, {"name": "convert_to_tensor", "path": "./tf/convert_to_tensor.md", "desc": "Converts the given `value` to a `Tensor`.", "type": "Functions", "docs": "Converts the given `value` to a `Tensor`.\n\n This function converts Python objects of various types to `Tensor`\n objects. It accepts `Tensor` objects, numpy arrays, Python lists,\n and Python scalars.\n\n For example:\n\n >>> import numpy as np\n >>> def my_func(arg):\n ... arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n ... return arg\n\n >>> # The following calls are equivalent.\n ...\n >>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))\n >>> print(value_1)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])\n >>> print(value_2)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))\n >>> print(value_3)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n\n This function can be useful when composing a new operation in Python\n (such as `my_func` in the example above). All standard Python op\n constructors apply this function to each of their Tensor-valued\n inputs, which allows those ops to accept numpy arrays, Python lists,\n and scalars in addition to `Tensor` objects.\n\n Note: This function diverges from default Numpy behavior for `float` and\n `string` types when `None` is present in a Python list or scalar. Rather\n than silently converting `None` values, an error will be thrown.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the type\n is inferred from the type of `value`.\n dtype_hint: Optional element type for the returned tensor, used when dtype\n is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so dtype_hint can be used as a soft preference.\n If the conversion to `dtype_hint` is not possible, this argument has no\n effect.\n name: Optional name to use if a new `Tensor` is created.\n\n Returns:\n A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value` to `dtype`.\n RuntimeError: If a registered conversion function returns an invalid value.\n ValueError: If the `value` is a tensor not of given `dtype` in graph mode.\n "}, {"name": "cos", "path": "./tf/math/cos.md", "desc": "Computes cos of x element-wise.", "type": "Functions", "docs": "Computes cos of x element-wise.\n\n Given an input tensor, this function computes cosine of every\n element in the tensor. Input range is `(-inf, inf)` and\n output range is `[-1,1]`. If input lies outside the boundary, `nan`\n is returned.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "cosh", "path": "./tf/math/cosh.md", "desc": "Computes hyperbolic cosine of x element-wise.", "type": "Functions", "docs": "Computes hyperbolic cosine of x element-wise.\n\n Given an input tensor, this function computes hyperbolic cosine of every\n element in the tensor. Input range is `[-inf, inf]` and output range\n is `[1, inf]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 2, 10, float(\"inf\")])\n tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "cumsum", "path": "./tf/math/cumsum.md", "desc": "Compute the cumulative sum of the tensor `x` along `axis`.", "type": "Functions", "docs": "Compute the cumulative sum of the tensor `x` along `axis`.\n\n By default, this op performs an inclusive cumsum, which means that the first\n element of the input is identical to the first element of the output:\n For example:\n\n >>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c]\n >>> x = tf.constant([2, 4, 6, 8])\n >>> tf.cumsum(x)\n \n\n >>> # using varying `axis` values\n >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])\n >>> tf.cumsum(y, axis=0)\n \n >>> tf.cumsum(y, axis=1)\n \n\n By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed\n instead:\n\n >>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b]\n >>> x = tf.constant([2, 4, 6, 8])\n >>> tf.cumsum(x, exclusive=True)\n \n\n By setting the `reverse` kwarg to `True`, the cumsum is performed in the\n opposite direction:\n\n >>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]\n >>> x = tf.constant([2, 4, 6, 8])\n >>> tf.cumsum(x, reverse=True)\n \n\n This is more efficient than using separate `tf.reverse` ops.\n The `reverse` and `exclusive` kwargs can also be combined:\n\n >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]\n >>> x = tf.constant([2, 4, 6, 8])\n >>> tf.cumsum(x, exclusive=True, reverse=True)\n \n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n `complex128`, `qint8`, `quint8`, `qint32`, `half`.\n axis: A `Tensor` of type `int32` (default: 0). Must be in the range\n `[-rank(x), rank(x))`.\n exclusive: If `True`, perform exclusive cumsum.\n reverse: A `bool` (default: False).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "custom_gradient", "path": "./tf/custom_gradient.md", "desc": "Decorator to define a function with a custom gradient.", "type": "Functions", "docs": "Decorator to define a function with a custom gradient.\n\n This decorator allows fine grained control over the gradients of a sequence\n for operations. This may be useful for multiple reasons, including providing\n a more efficient or numerically stable gradient for a sequence of operations.\n\n For example, consider the following function that commonly occurs in the\n computation of cross entropy and log likelihoods:\n\n ```python\n def log1pexp(x):\n return tf.math.log(1 + tf.exp(x))\n ```\n\n Due to numerical instability, the gradient of this function evaluated at x=100\n is NaN. For example:\n\n ```python\n x = tf.constant(100.)\n y = log1pexp(x)\n dy_dx = tf.gradients(y, x) # Will be NaN when evaluated.\n ```\n\n The gradient expression can be analytically simplified to provide numerical\n stability:\n\n ```python\n @tf.custom_gradient\n def log1pexp(x):\n e = tf.exp(x)\n def grad(upstream):\n return upstream * (1 - 1 / (1 + e))\n return tf.math.log(1 + e), grad\n ```\n\n With this definition, the gradient `dy_dx` at `x = 100` will be correctly\n evaluated as 1.0.\n\n The variable `upstream` is defined as the upstream gradient. i.e. the gradient\n from all the layers or functions originating from this layer. The above\n example has no upstream functions, therefore `upstream = dy/dy = 1.0`.\n\n Assume that `x_i` is `log1pexp` in the forward pass `x_1 = x_1(x_0)`,\n `x_2 = x_2(x_1)`, ..., `x_i = x_i(x_i-1)`, ..., `x_n = x_n(x_n-1)`. By\n chain rule we know that `dx_n/dx_0 = dx_n/dx_n-1 * dx_n-1/dx_n-2 * ... *\n dx_i/dx_i-1 * ... * dx_1/dx_0`.\n\n In this case the gradient of our current function defined as\n `dx_i/dx_i-1 = (1 - 1 / (1 + e))`. The upstream gradient `upstream` would be\n `dx_n/dx_n-1 * dx_n-1/dx_n-2 * ... * dx_i+1/dx_i`. The upstream gradient\n multiplied by the current gradient is then passed downstream.\n\n In case the function takes multiple variables as input, the `grad`\n function must also return the same number of variables.\n We take the function `z = x * y` as an example.\n\n >>> @tf.custom_gradient\n ... def bar(x, y):\n ... def grad(upstream):\n ... dz_dx = y\n ... dz_dy = x\n ... return upstream * dz_dx, upstream * dz_dy\n ... z = x * y\n ... return z, grad\n >>> x = tf.constant(2.0, dtype=tf.float32)\n >>> y = tf.constant(3.0, dtype=tf.float32)\n >>> with tf.GradientTape(persistent=True) as tape:\n ... tape.watch(x)\n ... tape.watch(y)\n ... z = bar(x, y)\n >>> z\n \n >>> tape.gradient(z, x)\n \n >>> tape.gradient(z, y)\n \n\n Nesting custom gradients can lead to unintuitive results. The default\n behavior does not correspond to n-th order derivatives. For example\n\n ```python\n @tf.custom_gradient\n def op(x):\n y = op1(x)\n @tf.custom_gradient\n def grad_fn(dy):\n gdy = op2(x, y, dy)\n def grad_grad_fn(ddy): # Not the 2nd order gradient of op w.r.t. x.\n return op3(x, y, dy, ddy)\n return gdy, grad_grad_fn\n return y, grad_fn\n ```\n\n The function `grad_grad_fn` will be calculating the first order gradient\n of `grad_fn` with respect to `dy`, which is used to generate forward-mode\n gradient graphs from backward-mode gradient graphs, but is not the same as\n the second order gradient of `op` with respect to `x`.\n\n Instead, wrap nested `@tf.custom_gradients` in another function:\n\n ```python\n @tf.custom_gradient\n def op_with_fused_backprop(x):\n y, x_grad = fused_op(x)\n def first_order_gradient(dy):\n @tf.custom_gradient\n def first_order_custom(unused_x):\n def second_order_and_transpose(ddy):\n return second_order_for_x(...), gradient_wrt_dy(...)\n return x_grad, second_order_and_transpose\n return dy * first_order_custom(x)\n return y, first_order_gradient\n ```\n\n Additional arguments to the inner `@tf.custom_gradient`-decorated function\n control the expected return values of the innermost function.\n\n The examples above illustrate how to specify custom gradients for functions\n which do not read from variables. The following example uses variables, which\n require special handling because they are effectively inputs of the forward\n function.\n\n >>> weights = tf.Variable(tf.ones([2])) # Trainable variable weights\n >>> @tf.custom_gradient\n ... def linear_poly(x):\n ... # Creating polynomial\n ... poly = weights[1] * x + weights[0]\n ...\n ... def grad_fn(dpoly, variables):\n ... # dy/dx = weights[1] and we need to left multiply dpoly\n ... grad_xs = dpoly * weights[1] # Scalar gradient\n ...\n ... grad_vars = [] # To store gradients of passed variables\n ... assert variables is not None\n ... assert len(variables) == 1\n ... assert variables[0] is weights\n ... # Manually computing dy/dweights\n ... dy_dw = dpoly * tf.stack([x ** 1, x ** 0])\n ... grad_vars.append(\n ... tf.reduce_sum(tf.reshape(dy_dw, [2, -1]), axis=1)\n ... )\n ... return grad_xs, grad_vars\n ... return poly, grad_fn\n >>> x = tf.constant([1., 2., 3.])\n >>> with tf.GradientTape(persistent=True) as tape:\n ... tape.watch(x)\n ... poly = linear_poly(x)\n >>> poly # poly = x + 1\n \n >>> tape.gradient(poly, x) # conventional scalar gradient dy/dx\n \n >>> tape.gradient(poly, weights)\n \n\n Above example illustrates usage of trainable variable `weights`.\n In the example, the inner `grad_fn` accepts an extra `variables` input\n parameter and also returns an extra `grad_vars` output. That extra argument\n is passed if the forward function reads any variables. You need to\n compute the gradient w.r.t. each of those `variables` and output it as a list\n of `grad_vars`. Note here that default value of `variables` is set to `None`\n when no variables are used in the forward function.\n\n It should be noted `tf.GradientTape` is still watching the forward pass of a\n `tf.custom_gradient`, and will use the ops it watches. As a consequence,\n calling `tf.function` while the tape is still watching leads\n to a gradient graph being built. If an op is used in `tf.function` without\n registered gradient, a `LookupError` will be raised.\n\n Users can insert `tf.stop_gradient` to customize this behavior. This\n is demonstrated in the example below. `tf.random.shuffle` does not have a\n registered gradient. As a result `tf.stop_gradient` is used to avoid the\n `LookupError`.\n\n ```python\n x = tf.constant([0.3, 0.5], dtype=tf.float32)\n\n @tf.custom_gradient\n def test_func_with_stop_grad(x):\n @tf.function\n def _inner_func():\n # Avoid exception during the forward pass\n return tf.stop_gradient(tf.random.shuffle(x))\n # return tf.random.shuffle(x) # This will raise\n\n res = _inner_func()\n def grad(upstream):\n return upstream # Arbitrarily defined custom gradient\n return res, grad\n\n with tf.GradientTape() as g:\n g.watch(x)\n res = test_func_with_stop_grad(x)\n\n g.gradient(res, x)\n ```\n\n See also `tf.RegisterGradient` which registers a gradient function for a\n primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows\n for fine grained control over the gradient computation of a sequence of\n operations.\n\n Note that if the decorated function uses `Variable`s, the enclosing variable\n scope must be using `ResourceVariable`s.\n\n Args:\n f: function `f(*x)` that returns a tuple `(y, grad_fn)` where:\n - `x` is a sequence of (nested structures of) `Tensor` inputs to the\n function.\n - `y` is a (nested structure of) `Tensor` outputs of applying TensorFlow\n operations in `f` to `x`.\n - `grad_fn` is a function with the signature `g(*grad_ys)` which returns\n a list of `Tensor`s the same size as (flattened) `x` - the derivatives\n of `Tensor`s in `y` with respect to the `Tensor`s in `x`. `grad_ys` is\n a sequence of `Tensor`s the same size as (flattened) `y` holding the\n initial value gradients for each `Tensor` in `y`.\n\n In a pure mathematical sense, a vector-argument vector-valued function\n `f`'s derivatives should be its Jacobian matrix `J`. Here we are\n expressing the Jacobian `J` as a function `grad_fn` which defines how\n `J` will transform a vector `grad_ys` when left-multiplied with it\n (`grad_ys * J`, the vector-Jacobian product, or VJP). This functional\n representation of a matrix is convenient to use for chain-rule\n calculation (in e.g. the back-propagation algorithm).\n\n If `f` uses `Variable`s (that are not part of the\n inputs), i.e. through `get_variable`, then `grad_fn` should have\n signature `g(*grad_ys, variables=None)`, where `variables` is a list of\n the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where\n `grad_xs` is the same as above, and `grad_vars` is a `list`\n with the derivatives of `Tensor`s in `y` with respect to the variables\n (that is, grad_vars has one Tensor per variable in variables).\n\n Returns:\n A function `h(x)` which returns the same value as `f(x)[0]` and whose\n gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`.\n "}, {"name": "device", "path": "./tf/device.md", "desc": "Specifies the device for ops created/executed in this context.", "type": "Functions", "docs": "Specifies the device for ops created/executed in this context.\n\n This function specifies the device to be used for ops created/executed in a\n particular context. Nested contexts will inherit and also create/execute\n their ops on the specified device. If a specific device is not required,\n consider not using this function so that a device can be automatically\n assigned. In general the use of this function is optional. `device_name` can\n be fully specified, as in \"/job:worker/task:1/device:cpu:0\", or partially\n specified, containing only a subset of the \"/\"-separated fields. Any fields\n which are specified will override device annotations from outer scopes.\n\n For example:\n\n ```python\n with tf.device('/job:foo'):\n # ops created here have devices with /job:foo\n with tf.device('/job:bar/task:0/device:gpu:2'):\n # ops created here have the fully specified device above\n with tf.device('/device:gpu:1'):\n # ops created here have the device '/job:foo/device:gpu:1'\n ```\n\n Args:\n device_name: The device name to use in the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If a function is passed in.\n "}, {"name": "divide", "path": "./tf/math/divide.md", "desc": "Computes Python style division of `x` by `y`.", "type": "Functions", "docs": "Computes Python style division of `x` by `y`.\n\n For example:\n\n >>> x = tf.constant([16, 12, 11])\n >>> y = tf.constant([4, 6, 2])\n >>> tf.divide(x,y)\n \n\n Args:\n x: A `Tensor`\n y: A `Tensor`\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with same shape as input\n "}, {"name": "dynamic_partition", "path": "./tf/dynamic_partition.md", "desc": "Partitions `data` into `num_partitions` tensors using indices from `partitions`.", "type": "Functions", "docs": "Partitions `data` into `num_partitions` tensors using indices from `partitions`.\n\n For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`\n becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`\n are placed in `outputs[i]` in lexicographic order of `js`, and the first\n dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.\n In detail,\n\n ```python\n outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]\n\n outputs[i] = pack([data[js, ...] for js if partitions[js] == i])\n ```\n\n `data.shape` must start with `partitions.shape`.\n\n For example:\n\n ```python\n # Scalar partitions.\n partitions = 1\n num_partitions = 2\n data = [10, 20]\n outputs[0] = [] # Empty with shape [0, 2]\n outputs[1] = [[10, 20]]\n\n # Vector partitions.\n partitions = [0, 0, 1, 1, 0]\n num_partitions = 2\n data = [10, 20, 30, 40, 50]\n outputs[0] = [10, 20, 50]\n outputs[1] = [30, 40]\n ```\n\n See `dynamic_stitch` for an example on how to merge partitions back.\n\n
\n \n
\n\n Args:\n data: A `Tensor`.\n partitions: A `Tensor` of type `int32`.\n Any shape. Indices in the range `[0, num_partitions)`.\n num_partitions: An `int` that is `>= 1`.\n The number of partitions to output.\n name: A name for the operation (optional).\n\n Returns:\n A list of `num_partitions` `Tensor` objects with the same type as `data`.\n "}, {"name": "dynamic_stitch", "path": "./tf/dynamic_stitch.md", "desc": "Interleave the values from the `data` tensors into a single tensor.", "type": "Functions", "docs": "Interleave the values from the `data` tensors into a single tensor.\n\n Builds a merged tensor such that\n\n ```python\n merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]\n ```\n\n For example, if each `indices[m]` is scalar or vector, we have\n\n ```python\n # Scalar indices:\n merged[indices[m], ...] = data[m][...]\n\n # Vector indices:\n merged[indices[m][i], ...] = data[m][i, ...]\n ```\n\n Each `data[i].shape` must start with the corresponding `indices[i].shape`,\n and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we\n must have `data[i].shape = indices[i].shape + constant`. In terms of this\n `constant`, the output shape is\n\n merged.shape = [max(indices)] + constant\n\n Values are merged in order, so if an index appears in both `indices[m][i]` and\n `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the\n merged result. If you do not need this guarantee, ParallelDynamicStitch might\n perform better on some devices.\n\n For example:\n\n ```python\n indices[0] = 6\n indices[1] = [4, 1]\n indices[2] = [[5, 2], [0, 3]]\n data[0] = [61, 62]\n data[1] = [[41, 42], [11, 12]]\n data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]\n merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],\n [51, 52], [61, 62]]\n ```\n\n This method can be used to merge partitions created by `dynamic_partition`\n as illustrated on the following example:\n\n ```python\n # Apply function (increments x_i) on elements for which a certain condition\n # apply (x_i != -1 in this example).\n x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])\n condition_mask=tf.not_equal(x,tf.constant(-1.))\n partitioned_data = tf.dynamic_partition(\n x, tf.cast(condition_mask, tf.int32) , 2)\n partitioned_data[1] = partitioned_data[1] + 1.0\n condition_indices = tf.dynamic_partition(\n tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)\n x = tf.dynamic_stitch(condition_indices, partitioned_data)\n # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain\n # unchanged.\n ```\n\n
\n \n
\n\n Args:\n indices: A list of at least 1 `Tensor` objects with type `int32`.\n data: A list with the same length as `indices` of `Tensor` objects with the same type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `data`.\n "}, {"name": "edit_distance", "path": "./tf/edit_distance.md", "desc": "Computes the Levenshtein distance between sequences.", "type": "Functions", "docs": "Computes the Levenshtein distance between sequences.\n\n This operation takes variable-length sequences (`hypothesis` and `truth`),\n each provided as a `SparseTensor`, and computes the Levenshtein distance.\n You can normalize the edit distance by length of `truth` by setting\n `normalize` to true.\n\n For example:\n\n Given the following input,\n * `hypothesis` is a `tf.SparseTensor` of shape `[2, 1, 1]`\n * `truth` is a `tf.SparseTensor` of shape `[2, 2, 2]`\n\n >>> hypothesis = tf.SparseTensor(\n ... [[0, 0, 0],\n ... [1, 0, 0]],\n ... [\"a\", \"b\"],\n ... (2, 1, 1))\n >>> truth = tf.SparseTensor(\n ... [[0, 1, 0],\n ... [1, 0, 0],\n ... [1, 0, 1],\n ... [1, 1, 0]],\n ... [\"a\", \"b\", \"c\", \"a\"],\n ... (2, 2, 2))\n >>> tf.edit_distance(hypothesis, truth, normalize=True)\n \n\n The operation returns a dense Tensor of shape `[2, 2]` with\n edit distances normalized by `truth` lengths.\n\n **Note**: It is possible to calculate edit distance between two\n sparse tensors with variable-length values. However, attempting to create\n them while eager execution is enabled will result in a `ValueError`.\n\n For the following inputs,\n\n ```python\n # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:\n # (0,0) = [\"a\"]\n # (1,0) = [\"b\"]\n hypothesis = tf.sparse.SparseTensor(\n [[0, 0, 0],\n [1, 0, 0]],\n [\"a\", \"b\"],\n (2, 1, 1))\n\n # 'truth' is a tensor of shape `[2, 2]` with variable-length values:\n # (0,0) = []\n # (0,1) = [\"a\"]\n # (1,0) = [\"b\", \"c\"]\n # (1,1) = [\"a\"]\n truth = tf.sparse.SparseTensor(\n [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]],\n [\"a\", \"b\", \"c\", \"a\"],\n (2, 2, 2))\n\n normalize = True\n\n # The output would be a dense Tensor of shape `(2,)`, with edit distances\n normalized by 'truth' lengths.\n # output => array([0., 0.5], dtype=float32)\n ```\n\n Args:\n hypothesis: A `SparseTensor` containing hypothesis sequences.\n truth: A `SparseTensor` containing truth sequences.\n normalize: A `bool`. If `True`, normalizes the Levenshtein distance by\n length of `truth.`\n name: A name for the operation (optional).\n\n Returns:\n A dense `Tensor` with rank `R - 1`, where R is the rank of the\n `SparseTensor` inputs `hypothesis` and `truth`.\n\n Raises:\n TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.\n "}, {"name": "eig", "path": "./tf/linalg/eig.md", "desc": "Computes the eigen decomposition of a batch of matrices.", "type": "Functions", "docs": "Computes the eigen decomposition of a batch of matrices.\n\n The eigenvalues\n and eigenvectors for a non-Hermitian matrix in general are complex. The\n eigenvectors are not guaranteed to be linearly independent.\n\n Computes the eigenvalues and right eigenvectors of the innermost\n N-by-N matrices in `tensor` such that\n `tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.\n\n Args:\n tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of\n each inner inner matrix is referenced.\n name: string, optional name of the operation.\n\n Returns:\n e: Eigenvalues. Shape is `[..., N]`. Sorted in non-decreasing order.\n v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most\n matrices contain eigenvectors of the corresponding matrices in `tensor`\n "}, {"name": "eigvals", "path": "./tf/linalg/eigvals.md", "desc": "Computes the eigenvalues of one or more matrices.", "type": "Functions", "docs": "Computes the eigenvalues of one or more matrices.\n\n Note: If your program backpropagates through this function, you should replace\n it with a call to tf.linalg.eig (possibly ignoring the second output) to\n avoid computing the eigen decomposition twice. This is because the\n eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See\n _SelfAdjointEigV2Grad in linalg_grad.py.\n\n Args:\n tensor: `Tensor` of shape `[..., N, N]`.\n name: string, optional name of the operation.\n\n Returns:\n e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`\n eigenvalues of `tensor[..., :, :]`.\n "}, {"name": "einsum", "path": "./tf/einsum.md", "desc": "Tensor contraction over specified indices and outer product.", "type": "Functions", "docs": "Tensor contraction over specified indices and outer product.\n\n Einsum allows defining Tensors by defining their element-wise computation.\n This computation is defined by `equation`, a shorthand form based on Einstein\n summation. As an example, consider multiplying two matrices A and B to form a\n matrix C. The elements of C are given by:\n\n $$ C_{i,k} = \\sum_j A_{i,j} B_{j,k} $$\n\n or\n\n ```\n C[i,k] = sum_j A[i,j] * B[j,k]\n ```\n\n The corresponding einsum `equation` is:\n\n ```\n ij,jk->ik\n ```\n\n In general, to convert the element-wise equation into the `equation` string,\n use the following procedure (intermediate strings for matrix multiplication\n example provided in parentheses):\n\n 1. remove variable names, brackets, and commas, (`ik = sum_j ij * jk`)\n 2. replace \"*\" with \",\", (`ik = sum_j ij , jk`)\n 3. drop summation signs, and (`ik = ij, jk`)\n 4. move the output to the right, while replacing \"=\" with \"->\". (`ij,jk->ik`)\n\n Note: If the output indices are not specified repeated indices are summed.\n So `ij,jk->ik` can be simplified to `ij,jk`.\n\n Many common operations can be expressed in this way. For example:\n\n **Matrix multiplication**\n\n >>> m0 = tf.random.normal(shape=[2, 3])\n >>> m1 = tf.random.normal(shape=[3, 5])\n >>> e = tf.einsum('ij,jk->ik', m0, m1)\n >>> # output[i,k] = sum_j m0[i,j] * m1[j, k]\n >>> print(e.shape)\n (2, 5)\n\n Repeated indices are summed if the output indices are not specified.\n\n >>> e = tf.einsum('ij,jk', m0, m1) # output[i,k] = sum_j m0[i,j] * m1[j, k]\n >>> print(e.shape)\n (2, 5)\n\n\n **Dot product**\n\n >>> u = tf.random.normal(shape=[5])\n >>> v = tf.random.normal(shape=[5])\n >>> e = tf.einsum('i,i->', u, v) # output = sum_i u[i]*v[i]\n >>> print(e.shape)\n ()\n\n **Outer product**\n\n >>> u = tf.random.normal(shape=[3])\n >>> v = tf.random.normal(shape=[5])\n >>> e = tf.einsum('i,j->ij', u, v) # output[i,j] = u[i]*v[j]\n >>> print(e.shape)\n (3, 5)\n\n **Transpose**\n\n >>> m = tf.ones(2,3)\n >>> e = tf.einsum('ij->ji', m0) # output[j,i] = m0[i,j]\n >>> print(e.shape)\n (3, 2)\n\n **Diag**\n\n >>> m = tf.reshape(tf.range(9), [3,3])\n >>> diag = tf.einsum('ii->i', m)\n >>> print(diag.shape)\n (3,)\n\n **Trace**\n\n >>> # Repeated indices are summed.\n >>> trace = tf.einsum('ii', m) # output[j,i] = trace(m) = sum_i m[i, i]\n >>> assert trace == sum(diag)\n >>> print(trace.shape)\n ()\n\n **Batch matrix multiplication**\n\n >>> s = tf.random.normal(shape=[7,5,3])\n >>> t = tf.random.normal(shape=[7,3,2])\n >>> e = tf.einsum('bij,bjk->bik', s, t)\n >>> # output[a,i,k] = sum_j s[a,i,j] * t[a, j, k]\n >>> print(e.shape)\n (7, 5, 2)\n\n This method does not support broadcasting on named-axes. All axes with\n matching labels should have the same length. If you have length-1 axes,\n use `tf.squeeze` or `tf.reshape` to eliminate them.\n\n To write code that is agnostic to the number of indices in the input\n use an ellipsis. The ellipsis is a placeholder for \"whatever other indices\n fit here\".\n\n For example, to perform a NumPy-style broadcasting-batch-matrix multiplication\n where the matrix multiply acts on the last two axes of the input, use:\n\n >>> s = tf.random.normal(shape=[11, 7, 5, 3])\n >>> t = tf.random.normal(shape=[11, 7, 3, 2])\n >>> e = tf.einsum('...ij,...jk->...ik', s, t)\n >>> print(e.shape)\n (11, 7, 5, 2)\n\n Einsum **will** broadcast over axes covered by the ellipsis.\n\n >>> s = tf.random.normal(shape=[11, 1, 5, 3])\n >>> t = tf.random.normal(shape=[1, 7, 3, 2])\n >>> e = tf.einsum('...ij,...jk->...ik', s, t)\n >>> print(e.shape)\n (11, 7, 5, 2)\n\n Args:\n equation: a `str` describing the contraction, in the same format as\n `numpy.einsum`.\n *inputs: the inputs to contract (each one a `Tensor`), whose shapes should\n be consistent with `equation`.\n **kwargs:\n - optimize: Optimization strategy to use to find contraction path using\n opt_einsum. Must be 'greedy', 'optimal', 'branch-2', 'branch-all' or\n 'auto'. (optional, default: 'greedy').\n - name: A name for the operation (optional).\n\n Returns:\n The contracted `Tensor`, with shape determined by `equation`.\n\n Raises:\n ValueError: If\n - the format of `equation` is incorrect,\n - number of inputs or their shapes are inconsistent with `equation`.\n "}, {"name": "ensure_shape", "path": "./tf/ensure_shape.md", "desc": "Updates the shape of a tensor and checks at runtime that the shape holds.", "type": "Functions", "docs": "Updates the shape of a tensor and checks at runtime that the shape holds.\n\n When executed, this operation asserts that the input tensor `x`'s shape\n is compatible with the `shape` argument.\n See `tf.TensorShape.is_compatible_with` for details.\n\n >>> x = tf.constant([[1, 2, 3],\n ... [4, 5, 6]])\n >>> x = tf.ensure_shape(x, [2, 3])\n\n Use `None` for unknown dimensions:\n\n >>> x = tf.ensure_shape(x, [None, 3])\n >>> x = tf.ensure_shape(x, [2, None])\n\n If the tensor's shape is not compatible with the `shape` argument, an error\n is raised:\n\n >>> x = tf.ensure_shape(x, [5])\n Traceback (most recent call last):\n ...\n tf.errors.InvalidArgumentError: Shape of tensor dummy_input [3] is not\n compatible with expected shape [5]. [Op:EnsureShape]\n\n During graph construction (typically tracing a `tf.function`),\n `tf.ensure_shape` updates the static-shape of the **result** tensor by\n merging the two shapes. See `tf.TensorShape.merge_with` for details.\n\n This is most useful when **you** know a shape that can't be determined\n statically by TensorFlow.\n\n The following trivial `tf.function` prints the input tensor's\n static-shape before and after `ensure_shape` is applied.\n\n >>> @tf.function\n ... def f(tensor):\n ... print(\"Static-shape before:\", tensor.shape)\n ... tensor = tf.ensure_shape(tensor, [None, 3])\n ... print(\"Static-shape after:\", tensor.shape)\n ... return tensor\n\n This lets you see the effect of `tf.ensure_shape` when the function is traced:\n >>> cf = f.get_concrete_function(tf.TensorSpec([None, None]))\n Static-shape before: (None, None)\n Static-shape after: (None, 3)\n\n >>> cf(tf.zeros([3, 3])) # Passes\n >>> cf(tf.constant([1, 2, 3])) # fails\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Shape of tensor x [3] is not compatible with expected shape [3,3].\n\n The above example raises `tf.errors.InvalidArgumentError`, because `x`'s\n shape, `(3,)`, is not compatible with the `shape` argument, `(None, 3)`\n\n Inside a `tf.function` or `v1.Graph` context it checks both the buildtime and\n runtime shapes. This is stricter than `tf.Tensor.set_shape` which only\n checks the buildtime shape.\n\n Note: This differs from `tf.Tensor.set_shape` in that it sets the static shape\n of the resulting tensor and enforces it at runtime, raising an error if the\n tensor's runtime shape is incompatible with the specified shape.\n `tf.Tensor.set_shape` sets the static shape of the tensor without enforcing it\n at runtime, which may result in inconsistencies between the statically-known\n shape of tensors and the runtime value of tensors.\n\n For example, of loading images of a known size:\n\n >>> @tf.function\n ... def decode_image(png):\n ... image = tf.image.decode_png(png, channels=3)\n ... # the `print` executes during tracing.\n ... print(\"Initial shape: \", image.shape)\n ... image = tf.ensure_shape(image,[28, 28, 3])\n ... print(\"Final shape: \", image.shape)\n ... return image\n\n When tracing a function, no ops are being executed, shapes may be unknown.\n See the [Concrete Functions Guide](https://www.tensorflow.org/guide/concrete_function)\n for details.\n\n >>> concrete_decode = decode_image.get_concrete_function(\n ... tf.TensorSpec([], dtype=tf.string))\n Initial shape: (None, None, 3)\n Final shape: (28, 28, 3)\n\n >>> image = tf.random.uniform(maxval=255, shape=[28, 28, 3], dtype=tf.int32)\n >>> image = tf.cast(image,tf.uint8)\n >>> png = tf.image.encode_png(image)\n >>> image2 = concrete_decode(png)\n >>> print(image2.shape)\n (28, 28, 3)\n\n >>> image = tf.concat([image,image], axis=0)\n >>> print(image.shape)\n (56, 28, 3)\n >>> png = tf.image.encode_png(image)\n >>> image2 = concrete_decode(png)\n Traceback (most recent call last):\n ...\n tf.errors.InvalidArgumentError: Shape of tensor DecodePng [56,28,3] is not\n compatible with expected shape [28,28,3].\n\n Caution: if you don't use the result of `tf.ensure_shape` the check may not\n run.\n\n >>> @tf.function\n ... def bad_decode_image(png):\n ... image = tf.image.decode_png(png, channels=3)\n ... # the `print` executes during tracing.\n ... print(\"Initial shape: \", image.shape)\n ... # BAD: forgot to use the returned tensor.\n ... tf.ensure_shape(image,[28, 28, 3])\n ... print(\"Final shape: \", image.shape)\n ... return image\n\n >>> image = bad_decode_image(png)\n Initial shape: (None, None, 3)\n Final shape: (None, None, 3)\n >>> print(image.shape)\n (56, 28, 3)\n\n Args:\n x: A `Tensor`.\n shape: A `TensorShape` representing the shape of this tensor, a\n `TensorShapeProto`, a list, a tuple, or None.\n name: A name for this operation (optional). Defaults to \"EnsureShape\".\n\n Returns:\n A `Tensor`. Has the same type and contents as `x`.\n\n Raises:\n tf.errors.InvalidArgumentError: If `shape` is incompatible with the shape\n of `x`.\n "}, {"name": "equal", "path": "./tf/math/equal.md", "desc": "Returns the truth value of (x == y", "type": "Functions", "docs": "Returns the truth value of (x == y) element-wise.\n\n Performs a [broadcast](\n https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the\n arguments and then an element-wise equality comparison, returning a Tensor of\n boolean values.\n\n For example:\n\n >>> x = tf.constant([2, 4])\n >>> y = tf.constant(2)\n >>> tf.math.equal(x, y)\n \n\n >>> x = tf.constant([2, 4])\n >>> y = tf.constant([2, 4])\n >>> tf.math.equal(x, y)\n \n\n Args:\n x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.\n y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of type bool with the same size as that of x or y.\n\n Raises:\n `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible\n "}, {"name": "executing_eagerly", "path": "./tf/executing_eagerly.md", "desc": "Checks whether the current thread has eager execution enabled.", "type": "Functions", "docs": "Checks whether the current thread has eager execution enabled.\n\n Eager execution is enabled by default and this API returns `True`\n in most of cases. However, this API might return `False` in the following use\n cases.\n\n * Executing inside `tf.function`, unless under `tf.init_scope` or\n `tf.config.run_functions_eagerly(True)` is previously called.\n * Executing inside a transformation function for `tf.dataset`.\n * `tf.compat.v1.disable_eager_execution()` is called.\n\n General case:\n\n >>> print(tf.executing_eagerly())\n True\n\n Inside `tf.function`:\n\n >>> @tf.function\n ... def fn():\n ... with tf.init_scope():\n ... print(tf.executing_eagerly())\n ... print(tf.executing_eagerly())\n >>> fn()\n True\n False\n\n Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called:\n\n >>> tf.config.run_functions_eagerly(True)\n >>> @tf.function\n ... def fn():\n ... with tf.init_scope():\n ... print(tf.executing_eagerly())\n ... print(tf.executing_eagerly())\n >>> fn()\n True\n True\n >>> tf.config.run_functions_eagerly(False)\n\n Inside a transformation function for `tf.dataset`:\n\n >>> def data_fn(x):\n ... print(tf.executing_eagerly())\n ... return x\n >>> dataset = tf.data.Dataset.range(100)\n >>> dataset = dataset.map(data_fn)\n False\n\n Returns:\n `True` if the current thread has eager execution enabled.\n "}, {"name": "exp", "path": "./tf/math/exp.md", "desc": "Computes exponential of x element-wise. \\\\(y = e^x\\\\", "type": "Functions", "docs": "Computes exponential of x element-wise. \\\\(y = e^x\\\\).\n\n This function computes the exponential of the input tensor element-wise.\n i.e. `math.exp(x)` or \\\\(e^x\\\\), where `x` is the input tensor.\n \\\\(e\\\\) denotes Euler's number and is approximately equal to 2.718281.\n Output is positive for any real input.\n\n >>> x = tf.constant(2.0)\n >>> tf.math.exp(x)\n \n\n >>> x = tf.constant([2.0, 8.0])\n >>> tf.math.exp(x)\n \n\n For complex numbers, the exponential value is calculated as\n $$\n e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\\cos (y) + i \\sin (y)})\n $$\n\n For `1+1j` the value would be computed as:\n $$\n e^1 (\\cos (1) + i \\sin (1)) = 2.7182817 \\times (0.5403023+0.84147096j)\n $$\n\n >>> x = tf.constant(1 + 1j)\n >>> tf.math.exp(x)\n \n\n Args:\n x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,\n `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor`. Has the same type as `x`.\n\n @compatibility(numpy)\n Equivalent to np.exp\n @end_compatibility\n "}, {"name": "expand_dims", "path": "./tf/expand_dims.md", "desc": "Returns a tensor with a length 1 axis inserted at index `axis`.", "type": "Functions", "docs": "Returns a tensor with a length 1 axis inserted at index `axis`.\n\n Given a tensor `input`, this operation inserts a dimension of length 1 at the\n dimension index `axis` of `input`'s shape. The dimension index follows Python\n indexing rules: It's zero-based, a negative index it is counted backward\n from the end.\n\n This operation is useful to:\n\n * Add an outer \"batch\" dimension to a single element.\n * Align axes for broadcasting.\n * To add an inner vector length axis to a tensor of scalars.\n\n For example:\n\n If you have a single image of shape `[height, width, channels]`:\n\n >>> image = tf.zeros([10,10,3])\n\n You can add an outer `batch` axis by passing `axis=0`:\n\n >>> tf.expand_dims(image, axis=0).shape.as_list()\n [1, 10, 10, 3]\n\n The new axis location matches Python `list.insert(axis, 1)`:\n\n >>> tf.expand_dims(image, axis=1).shape.as_list()\n [10, 1, 10, 3]\n\n Following standard Python indexing rules, a negative `axis` counts from the\n end so `axis=-1` adds an inner most dimension:\n\n >>> tf.expand_dims(image, -1).shape.as_list()\n [10, 10, 3, 1]\n\n This operation requires that `axis` is a valid index for `input.shape`,\n following Python indexing rules:\n\n ```\n -1-tf.rank(input) <= axis <= tf.rank(input)\n ```\n\n This operation is related to:\n\n * `tf.squeeze`, which removes dimensions of size 1.\n * `tf.reshape`, which provides more flexible reshaping capability.\n * `tf.sparse.expand_dims`, which provides this functionality for\n `tf.SparseTensor`\n\n Args:\n input: A `Tensor`.\n axis: Integer specifying the dimension index at which to expand the\n shape of `input`. Given an input of D dimensions, `axis` must be in range\n `[-(D+1), D]` (inclusive).\n name: Optional string. The name of the output `Tensor`.\n\n Returns:\n A tensor with the same data as `input`, with an additional dimension\n inserted at the index specified by `axis`.\n\n Raises:\n TypeError: If `axis` is not specified.\n InvalidArgumentError: If `axis` is out of range `[-(D+1), D]`.\n "}, {"name": "extract_volume_patches", "path": "./tf/extract_volume_patches.md", "desc": "Extract `patches` from `input` and put them in the `\"depth\"` output dimension. 3D extension of `extract_image_patches`.", "type": "Functions", "docs": "Extract `patches` from `input` and put them in the `\"depth\"` output dimension. 3D extension of `extract_image_patches`.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.\n ksizes: A list of `ints` that has length `>= 5`.\n The size of the sliding window for each dimension of `input`.\n strides: A list of `ints` that has length `>= 5`.\n 1-D of length 5. How far the centers of two consecutive patches are in\n `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.\n padding: A `string` from: `\"SAME\", \"VALID\"`.\n The type of padding algorithm to use.\n\n The size-related attributes are specified as follows:\n\n ```python\n ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]\n strides = [1, stride_planes, strides_rows, strides_cols, 1]\n ```\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "}, {"name": "eye", "path": "./tf/eye.md", "desc": "Construct an identity matrix, or a batch of matrices.", "type": "Functions", "docs": "Construct an identity matrix, or a batch of matrices.\n\n See also `tf.ones`, `tf.zeros`, `tf.fill`, `tf.one_hot`.\n\n ```python\n # Construct one identity matrix.\n tf.eye(2)\n ==> [[1., 0.],\n [0., 1.]]\n\n # Construct a batch of 3 identity matrices, each 2 x 2.\n # batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.\n batch_identity = tf.eye(2, batch_shape=[3])\n\n # Construct one 2 x 3 \"identity\" matrix\n tf.eye(2, num_columns=3)\n ==> [[ 1., 0., 0.],\n [ 0., 1., 0.]]\n ```\n\n Args:\n num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows\n in each batch matrix.\n num_columns: Optional non-negative `int32` scalar `Tensor` giving the number\n of columns in each batch matrix. Defaults to `num_rows`.\n batch_shape: A list or tuple of Python integers or a 1-D `int32` `Tensor`.\n If provided, the returned `Tensor` will have leading batch dimensions of\n this shape.\n dtype: The type of an element in the resulting `Tensor`\n name: A name for this `Op`. Defaults to \"eye\".\n\n Returns:\n A `Tensor` of shape `batch_shape + [num_rows, num_columns]`\n "}, {"name": "fill", "path": "./tf/fill.md", "desc": "Creates a tensor filled with a scalar value.", "type": "Functions", "docs": "Creates a tensor filled with a scalar value.\n\n See also `tf.ones`, `tf.zeros`, `tf.one_hot`, `tf.eye`.\n\n This operation creates a tensor of shape `dims` and fills it with `value`.\n\n For example:\n\n >>> tf.fill([2, 3], 9)\n \n\n `tf.fill` evaluates at graph runtime and supports dynamic shapes based on\n other runtime `tf.Tensors`, unlike `tf.constant(value, shape=dims)`, which\n embeds the value as a `Const` node.\n\n Args:\n dims: A 1-D sequence of non-negative numbers. Represents the shape of the\n output `tf.Tensor`. Entries should be of type: `int32`, `int64`.\n value: A value to fill the returned `tf.Tensor`.\n name: Optional string. The name of the output `tf.Tensor`.\n\n Returns:\n A `tf.Tensor` with shape `dims` and the same dtype as `value`.\n\n Raises:\n InvalidArgumentError: `dims` contains negative entries.\n NotFoundError: `dims` contains non-integer entries.\n\n @compatibility(numpy)\n Similar to `np.full`. In `numpy`, more parameters are supported. Passing a\n number argument as the shape (`np.full(5, value)`) is valid in `numpy` for\n specifying a 1-D shaped result, while TensorFlow does not support this syntax.\n @end_compatibility\n "}, {"name": "fingerprint", "path": "./tf/fingerprint.md", "desc": "Generates fingerprint values.", "type": "Functions", "docs": "Generates fingerprint values.\n\n Generates fingerprint values of `data`.\n\n Fingerprint op considers the first dimension of `data` as the batch dimension,\n and `output[i]` contains the fingerprint value generated from contents in\n `data[i, ...]` for all `i`.\n\n Fingerprint op writes fingerprint values as byte arrays. For example, the\n default method `farmhash64` generates a 64-bit fingerprint value at a time.\n This 8-byte value is written out as an `tf.uint8` array of size 8, in\n little-endian order.\n\n For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),\n and that the fingerprint method is `farmhash64`. In this case, the output\n shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the\n size of each fingerprint value in bytes. `output[0, :]` is generated from\n 12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from\n other 12 integers in `data[1, :, :]`.\n\n Note that this op fingerprints the raw underlying buffer, and it does not\n fingerprint Tensor's metadata such as data type and/or shape. For example, the\n fingerprint values are invariant under reshapes and bitcasts as long as the\n batch dimension remain the same:\n\n ```python\n tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))\n tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))\n ```\n\n For string data, one should expect `tf.fingerprint(data) !=\n tf.fingerprint(tf.string.reduce_join(data))` in general.\n\n Args:\n data: A `Tensor`. Must have rank 1 or higher.\n method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.\n Currently available method is `farmhash64`.\n name: A name for the operation (optional).\n\n Returns:\n A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to\n `data`'s first dimension, and the second dimension size depends on the\n fingerprint algorithm.\n "}, {"name": "floor", "path": "./tf/math/floor.md", "desc": "Returns element-wise largest integer not greater than x.", "type": "Functions", "docs": "Returns element-wise largest integer not greater than x.\n\n Both input range is `(-inf, inf)` and the\n output range consists of all integer values.\n\n For example:\n\n >>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float(\"inf\")])\n >>> tf.floor(x).numpy()\n array([ 1., -2., 5., -3., 0., inf], dtype=float32)\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,\n `float32`, `float64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as x.\n "}, {"name": "foldl", "path": "./tf/foldl.md", "desc": "foldl on the list of tensors unpacked from `elems` on dimension 0. (deprecated argument values", "type": "Functions", "docs": "foldl on the list of tensors unpacked from `elems` on dimension 0. (deprecated argument values)\n\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(back_prop=False)`. They will be removed in a future version.\nInstructions for updating:\nback_prop=False is deprecated. Consider using tf.stop_gradient instead.\nInstead of:\nresults = tf.foldl(fn, elems, back_prop=False)\nUse:\nresults = tf.nest.map_structure(tf.stop_gradient, tf.foldl(fn, elems))\n\nThis foldl operator repeatedly applies the callable `fn` to a sequence\nof elements from first to last. The elements are made of the tensors\nunpacked from `elems` on dimension 0. The callable fn takes two tensors as\narguments. The first argument is the accumulated value computed from the\npreceding invocation of fn, and the second is the value at the current\nposition of `elems`. If `initializer` is None, `elems` must contain at least\none element, and its first element is used as the initializer.\n\nSuppose that `elems` is unpacked into `values`, a list of tensors. The shape\nof the result tensor is fn(initializer, values[0]).shape`.\n\nThis method also allows multi-arity `elems` and output of `fn`. If `elems`\nis a (possibly nested) list or tuple of tensors, then each of these tensors\nmust have a matching first (unpack) dimension. The signature of `fn` may\nmatch the structure of `elems`. That is, if `elems` is\n`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:\n`fn = lambda (t1, [t2, t3, [t4, t5]]):`.\n\nArgs:\n fn: The callable to be performed.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unpacked along their first dimension. The nested sequence of the\n resulting slices will be the first argument to `fn`.\n initializer: (optional) A tensor or (possibly nested) sequence of tensors,\n as the initial value for the accumulator.\n parallel_iterations: (optional) The number of iterations allowed to run in\n parallel.\n back_prop: (optional) Deprecated. False disables support for back\n propagation. Prefer using `tf.stop_gradient` instead.\n swap_memory: (optional) True enables GPU-CPU memory swapping.\n name: (optional) Name prefix for the returned tensors.\n\nReturns:\n A tensor or (possibly nested) sequence of tensors, resulting from applying\n `fn` consecutively to the list of tensors unpacked from `elems`, from first\n to last.\n\nRaises:\n TypeError: if `fn` is not callable.\n\nExample:\n ```python\n elems = tf.constant([1, 2, 3, 4, 5, 6])\n sum = foldl(lambda a, x: a + x, elems)\n # sum == 21\n ```"}, {"name": "foldr", "path": "./tf/foldr.md", "desc": "foldr on the list of tensors unpacked from `elems` on dimension 0. (deprecated argument values", "type": "Functions", "docs": "foldr on the list of tensors unpacked from `elems` on dimension 0. (deprecated argument values)\n\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(back_prop=False)`. They will be removed in a future version.\nInstructions for updating:\nback_prop=False is deprecated. Consider using tf.stop_gradient instead.\nInstead of:\nresults = tf.foldr(fn, elems, back_prop=False)\nUse:\nresults = tf.nest.map_structure(tf.stop_gradient, tf.foldr(fn, elems))\n\nThis foldr operator repeatedly applies the callable `fn` to a sequence\nof elements from last to first. The elements are made of the tensors\nunpacked from `elems`. The callable fn takes two tensors as arguments.\nThe first argument is the accumulated value computed from the preceding\ninvocation of fn, and the second is the value at the current position of\n`elems`. If `initializer` is None, `elems` must contain at least one element,\nand its first element is used as the initializer.\n\nSuppose that `elems` is unpacked into `values`, a list of tensors. The shape\nof the result tensor is `fn(initializer, values[0]).shape`.\n\nThis method also allows multi-arity `elems` and output of `fn`. If `elems`\nis a (possibly nested) list or tuple of tensors, then each of these tensors\nmust have a matching first (unpack) dimension. The signature of `fn` may\nmatch the structure of `elems`. That is, if `elems` is\n`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:\n`fn = lambda (t1, [t2, t3, [t4, t5]]):`.\n\nArgs:\n fn: The callable to be performed.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unpacked along their first dimension. The nested sequence of the\n resulting slices will be the first argument to `fn`.\n initializer: (optional) A tensor or (possibly nested) sequence of tensors,\n as the initial value for the accumulator.\n parallel_iterations: (optional) The number of iterations allowed to run in\n parallel.\n back_prop: (optional) Deprecated. False disables support for back\n propagation. Prefer using `tf.stop_gradient` instead.\n swap_memory: (optional) True enables GPU-CPU memory swapping.\n name: (optional) Name prefix for the returned tensors.\n\nReturns:\n A tensor or (possibly nested) sequence of tensors, resulting from applying\n `fn` consecutively to the list of tensors unpacked from `elems`, from last\n to first.\n\nRaises:\n TypeError: if `fn` is not callable.\n\nExample:\n ```python\n elems = [1, 2, 3, 4, 5, 6]\n sum = foldr(lambda a, x: a + x, elems)\n # sum == 21\n ```"}, {"name": "function", "path": "./tf/function.md", "desc": "Compiles a function into a callable TensorFlow graph. (deprecated arguments", "type": "Functions", "docs": "Compiles a function into a callable TensorFlow graph. (deprecated arguments) (deprecated arguments)\n\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(experimental_compile)`. They will be removed in a future version.\nInstructions for updating:\nexperimental_compile is deprecated, use jit_compile instead\n\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(experimental_relax_shapes)`. They will be removed in a future version.\nInstructions for updating:\nexperimental_relax_shapes is deprecated, use reduce_retracing instead\n\n`tf.function` constructs a `tf.types.experimental.GenericFunction` that\nexecutes a TensorFlow graph (`tf.Graph`) created by trace-compiling the\nTensorFlow operations in `func`. More information on the topic can be found\nin [Introduction to Graphs and tf.function]\n(https://www.tensorflow.org/guide/intro_to_graphs).\n\nSee [Better Performance with tf.function]\n(https://www.tensorflow.org/guide/function) for tips on performance and\nknown limitations.\n\nExample usage:\n\n>>> @tf.function\n... def f(x, y):\n... return x ** 2 + y\n>>> x = tf.constant([2, 3])\n>>> y = tf.constant([3, -2])\n>>> f(x, y)\n\n\nThe trace-compilation allows non-TensorFlow operations to execute, but under\nspecial conditions. In general, only TensorFlow operations are guaranteed to\nrun and create fresh results whenever the `GenericFunction` is called.\n\n## Features\n\n`func` may use data-dependent Python control flow statements, including `if`,\n`for`, `while` `break`, `continue` and `return`:\n\n>>> @tf.function\n... def f(x):\n... if tf.reduce_sum(x) > 0:\n... return x * x\n... else:\n... return -x // 2\n>>> f(tf.constant(-2))\n\n\n`func`'s closure may include `tf.Tensor` and `tf.Variable` objects:\n\n>>> @tf.function\n... def f():\n... return x ** 2 + y\n>>> x = tf.constant([-2, -3])\n>>> y = tf.Variable([3, -2])\n>>> f()\n\n\n`func` may also use ops with side effects, such as `tf.print`, `tf.Variable`\nand others:\n\n>>> v = tf.Variable(1)\n>>> @tf.function\n... def f(x):\n... for i in tf.range(x):\n... v.assign_add(i)\n>>> f(3)\n>>> v\n\n\nImportant: Any Python side-effects (appending to a list, printing with\n`print`, etc) will only happen once, when `func` is traced. To have\nside-effects executed into your `tf.function` they need to be written\nas TF ops:\n\n>>> l = []\n>>> @tf.function\n... def f(x):\n... for i in x:\n... l.append(i + 1) # Caution! Will only happen once when tracing\n>>> f(tf.constant([1, 2, 3]))\n>>> l\n[]\n\nInstead, use TensorFlow collections like `tf.TensorArray`:\n\n>>> @tf.function\n... def f(x):\n... ta = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True)\n... for i in range(len(x)):\n... ta = ta.write(i, x[i] + 1)\n... return ta.stack()\n>>> f(tf.constant([1, 2, 3]))\n\n\n## `tf.function` creates polymorphic callables\n\nInternally, `tf.types.experimental.GenericFunction` may contain multiple\n`tf.types.experimental.ConcreteFunction`s, each specialized to arguments with\ndifferent data types or shapes, since TensorFlow can perform more\noptimizations on graphs of specific shapes, dtypes and values of constant\narguments. `tf.function` treats any pure Python values as opaque objects (best\nthought of as compile-time constants), and builds a separate `tf.Graph` for\neach set of Python arguments that it encounters.\nFor more information, see the\n[tf.function guide](https://www.tensorflow.org/guide/function#rules_of_tracing)\n\nExecuting a `GenericFunction` will select and execute the appropriate\n`ConcreteFunction` based on the argument types and values.\n\nTo obtain an individual `ConcreteFunction`, use the\n`GenericFunction.get_concrete_function` method. It can be called with the\nsame arguments as `func` and returns a\n`tf.types.experimental.ConcreteFunction`. `ConcreteFunction`s are backed by a\nsingle `tf.Graph`:\n\n>>> @tf.function\n... def f(x):\n... return x + 1\n>>> isinstance(f.get_concrete_function(1).graph, tf.Graph)\nTrue\n\n`ConcreteFunction`s can be executed just like `GenericFunction`s, but their\ninput is resticted to the types to which they're specialized.\n\n## Retracing\n\n`ConcreteFunctions` are built (traced) on the fly, as the `GenericFunction` is\ncalled with new TensorFlow types or shapes, or with new Python values as\narguments. When `GenericFunction` builds a new trace, it is said that `func`\nis retraced. Retracing is a frequent performance concern for `tf.function` as\nit can be considerably slower than executing a graph that's already been\ntraced. It is ideal to minimize the amount of retracing in your code.\n\nCaution: Passing python scalars or lists as arguments to `tf.function` will\nusually retrace. To avoid this, pass numeric arguments as Tensors whenever\npossible:\n\n>>> @tf.function\n... def f(x):\n... return tf.abs(x)\n>>> f1 = f.get_concrete_function(1)\n>>> f2 = f.get_concrete_function(2) # Slow - compiles new graph\n>>> f1 is f2\nFalse\n>>> f1 = f.get_concrete_function(tf.constant(1))\n>>> f2 = f.get_concrete_function(tf.constant(2)) # Fast - reuses f1\n>>> f1 is f2\nTrue\n\nPython numerical arguments should only be used when they take few distinct\nvalues, such as hyperparameters like the number of layers in a neural network.\n\n## Input signatures\n\nFor Tensor arguments, `GenericFunction`creates a new `ConcreteFunction` for\nevery unique set of input shapes and datatypes. The example below creates two\nseparate `ConcreteFunction`s, each specialized to a different shape:\n\n>>> @tf.function\n... def f(x):\n... return x + 1\n>>> vector = tf.constant([1.0, 1.0])\n>>> matrix = tf.constant([[3.0]])\n>>> f.get_concrete_function(vector) is f.get_concrete_function(matrix)\nFalse\n\nAn \"input signature\" can be optionally provided to `tf.function` to control\nthis process. The input signature specifies the shape and type of each\nTensor argument to the function using a `tf.TensorSpec` object. More general\nshapes can be used. This ensures only one `ConcreteFunction` is created, and\nrestricts the `GenericFunction` to the specified shapes and types. It is\nan effective way to limit retracing when Tensors have dynamic shapes.\n\n>>> @tf.function(\n... input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])\n... def f(x):\n... return x + 1\n>>> vector = tf.constant([1.0, 1.0])\n>>> matrix = tf.constant([[3.0]])\n>>> f.get_concrete_function(vector) is f.get_concrete_function(matrix)\nTrue\n\n## Variables may only be created once\n\n`tf.function` only allows creating new `tf.Variable` objects when it is called\nfor the first time:\n\n>>> class MyModule(tf.Module):\n... def __init__(self):\n... self.v = None\n...\n... @tf.function\n... def __call__(self, x):\n... if self.v is None:\n... self.v = tf.Variable(tf.ones_like(x))\n... return self.v * x\n\nIn general, it is recommended to create `tf.Variable`s outside of\n`tf.function`.\nIn simple cases, persisting state across `tf.function` boundaries may be\nimplemented using a pure functional style in which state is represented by\n`tf.Tensor`s passed as arguments and returned as return values.\n\nContrast the two styles below:\n\n>>> state = tf.Variable(1)\n>>> @tf.function\n... def f(x):\n... state.assign_add(x)\n>>> f(tf.constant(2)) # Non-pure functional style\n>>> state\n\n\n>>> state = tf.constant(1)\n>>> @tf.function\n... def f(state, x):\n... state += x\n... return state\n>>> state = f(state, tf.constant(2)) # Pure functional style\n>>> state\n\n\n## Python operations execute only once per trace\n\n`func` may contain TensorFlow operations mixed with pure Python operations.\nHowever, when the function is executed, only the TensorFlow operations will\nrun. The Python operations run only once, at trace time. If TensorFlow\noperations depend on results from Pyhton operations, those results will be\nfrozen into the graph.\n\n>>> @tf.function\n... def f(a, b):\n... print('this runs at trace time; a is', a, 'and b is', b)\n... return b\n>>> f(1, tf.constant(1))\nthis runs at trace time; a is 1 and b is Tensor(\"...\", shape=(), dtype=int32)\n\n\n>>> f(1, tf.constant(2))\n\n\n>>> f(2, tf.constant(1))\nthis runs at trace time; a is 2 and b is Tensor(\"...\", shape=(), dtype=int32)\n\n\n>>> f(2, tf.constant(2))\n\n\n## Using type annotations to improve performance\n\n'experimental_follow_type_hints` can be used along with type annotations to\nreduce retracing by automatically casting any Python values to `tf.Tensor`\n(something that is not done by default, unless you use input signatures).\n\n>>> @tf.function(experimental_follow_type_hints=True)\n... def f_with_hints(x: tf.Tensor):\n... print('Tracing')\n... return x\n>>> @tf.function(experimental_follow_type_hints=False)\n... def f_no_hints(x: tf.Tensor):\n... print('Tracing')\n... return x\n>>> f_no_hints(1)\nTracing\n\n>>> f_no_hints(2)\nTracing\n\n>>> f_with_hints(1)\nTracing\n\n>>> f_with_hints(2)\n\n\nArgs:\n func: the function to be compiled. If `func` is None, `tf.function` returns\n a decorator that can be invoked with a single argument - `func`. In other\n words, `tf.function(input_signature=...)(func)` is equivalent to\n `tf.function(func, input_signature=...)`. The former can be used as\n decorator.\n input_signature: A possibly nested sequence of `tf.TensorSpec` objects\n specifying the shapes and dtypes of the Tensors that will be supplied to\n this function. If `None`, a separate function is instantiated for each\n inferred input signature. If input_signature is specified, every input to\n `func` must be a `Tensor`, and `func` cannot accept `**kwargs`.\n autograph: Whether autograph should be applied on `func` before tracing a\n graph. Data-dependent Python control flow statements require\n `autograph=True`. For more information, see the\n [tf.function and AutoGraph guide](\n https://www.tensorflow.org/guide/function#autograph_transformations).\n jit_compile: If `True`, compiles the function using\n [XLA](https://tensorflow.org/xla). XLA performs compiler optimizations,\n such as fusion, and attempts to emit more efficient code. This may\n drastically improve the performance. If set to `True`,\n the whole function needs to be compilable by XLA, or an\n `errors.InvalidArgumentError` is thrown.\n If `None` (default), compiles the function with XLA when running on TPU\n and goes through the regular function execution path when running on\n other devices.\n If `False`, executes the function without XLA compilation. Set this value\n to `False` when directly running a multi-device function on TPUs (e.g. two\n TPU cores, one TPU core and its host CPU).\n Not all functions are compilable, see a list of\n [sharp corners](https://tensorflow.org/xla/known_issues).\n reduce_retracing: When True, `tf.function` attempts to reduce the\n amount of retracing, for example by using more generic shapes. This\n can be controlled for user objects by customizing their associated\n `tf.types.experimental.TraceType`.\n experimental_implements: If provided, contains a name of a \"known\" function\n this implements. For example \"mycompany.my_recurrent_cell\".\n This is stored as an attribute in inference function,\n which can then be detected when processing serialized function.\n See [standardizing composite ops](https://github.com/tensorflow/community/blob/master/rfcs/20190610-standardizing-composite_ops.md) # pylint: disable=line-too-long\n for details. For an example of utilizing this attribute see this\n [example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc)\n The code above automatically detects and substitutes function that\n implements \"embedded_matmul\" and allows TFLite to substitute its own\n implementations. For instance, a tensorflow user can use this\n attribute to mark that their function also implements\n `embedded_matmul` (perhaps more efficiently!)\n by specifying it using this parameter:\n `@tf.function(experimental_implements=\"embedded_matmul\")`\n This can either be specified as just the string name of the function or\n a NameAttrList corresponding to a list of key-value attributes associated\n with the function name. The name of the function will be in the 'name'\n field of the NameAttrList. To define a formal TF op for this function\n implements, try the experimental [composite TF](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/tfr)\n project.\n experimental_autograph_options: Optional tuple of\n `tf.autograph.experimental.Feature` values.\n experimental_relax_shapes: Deprecated. Use `reduce_retracing`\n instead.\n experimental_compile: Deprecated alias to 'jit_compile'.\n experimental_follow_type_hints: When True, the function may use type\n annotations from `func` to optimize the tracing performance. For example,\n arguments annotated with `tf.Tensor` will automatically be converted\n to a Tensor.\n\nReturns:\n If `func` is not None, returns a `tf.types.experimental.GenericFunction`.\n If `func` is None, returns a decorator that, when invoked with a single\n `func` argument, returns a `tf.types.experimental.GenericFunction`.\n\nRaises:\n `ValueError` when attempting to use `jit_compile=True`, but XLA support is\n not available."}, {"name": "gather", "path": "./tf/gather.md", "desc": "Gather slices from params axis `axis` according to indices. (deprecated arguments", "type": "Functions", "docs": "Gather slices from params axis `axis` according to indices. (deprecated arguments)\n\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(validate_indices)`. They will be removed in a future version.\nInstructions for updating:\nThe `validate_indices` argument has no effect. Indices are always validated on CPU and never validated on GPU.\n\nGather slices from `params` axis `axis` according to `indices`. `indices`\nmust be an integer tensor of any dimension (often 1-D).\n\n`Tensor.__getitem__` works for scalars, `tf.newaxis`, and\n[python slices](https://numpy.org/doc/stable/reference/arrays.indexing.html#basic-slicing-and-indexing)\n\n`tf.gather` extends indexing to handle tensors of indices.\n\nIn the simplest case it's identical to scalar indexing:\n\n>>> params = tf.constant(['p0', 'p1', 'p2', 'p3', 'p4', 'p5'])\n>>> params[3].numpy()\nb'p3'\n>>> tf.gather(params, 3).numpy()\nb'p3'\n\nThe most common case is to pass a single axis tensor of indices (this\ncan't be expressed as a python slice because the indices are not sequential):\n\n>>> indices = [2, 0, 2, 5]\n>>> tf.gather(params, indices).numpy()\narray([b'p2', b'p0', b'p2', b'p5'], dtype=object)\n\n
\n\n
\n\nThe indices can have any shape. When the `params` has 1 axis, the\noutput shape is equal to the input shape:\n\n>>> tf.gather(params, [[2, 0], [2, 5]]).numpy()\narray([[b'p2', b'p0'],\n [b'p2', b'p5']], dtype=object)\n\nThe `params` may also have any shape. `gather` can select slices\nacross any axis depending on the `axis` argument (which defaults to 0).\nBelow it is used to gather first rows, then columns from a matrix:\n\n>>> params = tf.constant([[0, 1.0, 2.0],\n... [10.0, 11.0, 12.0],\n... [20.0, 21.0, 22.0],\n... [30.0, 31.0, 32.0]])\n>>> tf.gather(params, indices=[3,1]).numpy()\narray([[30., 31., 32.],\n [10., 11., 12.]], dtype=float32)\n>>> tf.gather(params, indices=[2,1], axis=1).numpy()\narray([[ 2., 1.],\n [12., 11.],\n [22., 21.],\n [32., 31.]], dtype=float32)\n\nMore generally: The output shape has the same shape as the input, with the\nindexed-axis replaced by the shape of the indices.\n\n>>> def result_shape(p_shape, i_shape, axis=0):\n... return p_shape[:axis] + i_shape + p_shape[axis+1:]\n>>>\n>>> result_shape([1, 2, 3], [], axis=1)\n[1, 3]\n>>> result_shape([1, 2, 3], [7], axis=1)\n[1, 7, 3]\n>>> result_shape([1, 2, 3], [7, 5], axis=1)\n[1, 7, 5, 3]\n\nHere are some examples:\n\n>>> params.shape.as_list()\n[4, 3]\n>>> indices = tf.constant([[0, 2]])\n>>> tf.gather(params, indices=indices, axis=0).shape.as_list()\n[1, 2, 3]\n>>> tf.gather(params, indices=indices, axis=1).shape.as_list()\n[4, 1, 2]\n\n>>> params = tf.random.normal(shape=(5, 6, 7, 8))\n>>> indices = tf.random.uniform(shape=(10, 11), maxval=7, dtype=tf.int32)\n>>> result = tf.gather(params, indices, axis=2)\n>>> result.shape.as_list()\n[5, 6, 10, 11, 8]\n\nThis is because each index takes a slice from `params`, and\nplaces it at the corresponding location in the output. For the above example\n\n>>> # For any location in indices\n>>> a, b = 0, 1\n>>> tf.reduce_all(\n... # the corresponding slice of the result\n... result[:, :, a, b, :] ==\n... # is equal to the slice of `params` along `axis` at the index.\n... params[:, :, indices[a, b], :]\n... ).numpy()\nTrue\n\n### Batching:\n\nThe `batch_dims` argument lets you gather different items from each element\nof a batch.\n\nUsing `batch_dims=1` is equivalent to having an outer loop over the first\naxis of `params` and `indices`:\n\n>>> params = tf.constant([\n... [0, 0, 1, 0, 2],\n... [3, 0, 0, 0, 4],\n... [0, 5, 0, 6, 0]])\n>>> indices = tf.constant([\n... [2, 4],\n... [0, 4],\n... [1, 3]])\n\n>>> tf.gather(params, indices, axis=1, batch_dims=1).numpy()\narray([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n\nThis is equivalent to:\n\n>>> def manually_batched_gather(params, indices, axis):\n... batch_dims=1\n... result = []\n... for p,i in zip(params, indices):\n... r = tf.gather(p, i, axis=axis-batch_dims)\n... result.append(r)\n... return tf.stack(result)\n>>> manually_batched_gather(params, indices, axis=1).numpy()\narray([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n\nHigher values of `batch_dims` are equivalent to multiple nested loops over\nthe outer axes of `params` and `indices`. So the overall shape function is\n\n>>> def batched_result_shape(p_shape, i_shape, axis=0, batch_dims=0):\n... return p_shape[:axis] + i_shape[batch_dims:] + p_shape[axis+1:]\n>>>\n>>> batched_result_shape(\n... p_shape=params.shape.as_list(),\n... i_shape=indices.shape.as_list(),\n... axis=1,\n... batch_dims=1)\n[3, 2]\n\n>>> tf.gather(params, indices, axis=1, batch_dims=1).shape.as_list()\n[3, 2]\n\nThis comes up naturally if you need to use the indices of an operation like\n`tf.argsort`, or `tf.math.top_k` where the last dimension of the indices\nindexes into the last dimension of input, at the corresponding location.\nIn this case you can use `tf.gather(values, indices, batch_dims=-1)`.\n\nSee also:\n\n* `tf.Tensor.__getitem__`: The direct tensor index operation (`t[]`), handles\n scalars and python-slices `tensor[..., 7, 1:-1]`\n* `tf.scatter`: A collection of operations similar to `__setitem__`\n (`t[i] = x`)\n* `tf.gather_nd`: An operation similar to `tf.gather` but gathers across\n multiple axis at once (it can gather elements of a matrix instead of rows\n or columns)\n* `tf.boolean_mask`, `tf.where`: Binary indexing.\n* `tf.slice` and `tf.strided_slice`: For lower level access to the\n implementation of `__getitem__`'s python-slice handling (`t[1:-1:2]`)\n\nArgs:\n params: The `Tensor` from which to gather values. Must be at least rank\n `axis + 1`.\n indices: The index `Tensor`. Must be one of the following types: `int32`,\n `int64`. The values must be in range `[0, params.shape[axis])`.\n validate_indices: Deprecated, does nothing. Indices are always validated on\n CPU, never validated on GPU.\n\n Caution: On CPU, if an out of bound index is found, an error is raised.\n On GPU, if an out of bound index is found, a 0 is stored in the\n corresponding output value.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The\n `axis` in `params` to gather `indices` from. Must be greater than or equal\n to `batch_dims`. Defaults to the first non-batch dimension. Supports\n negative indexes.\n batch_dims: An `integer`. The number of batch dimensions. Must be less\n than or equal to `rank(indices)`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor`. Has the same type as `params`."}, {"name": "gather_nd", "path": "./tf/gather_nd.md", "desc": "Gather slices from `params` into a Tensor with shape specified by `indices`.", "type": "Functions", "docs": "Gather slices from `params` into a Tensor with shape specified by `indices`.\n\n `indices` is a `Tensor` of indices into `params`. The index vectors are\n arranged along the last axis of `indices`.\n\n This is similar to `tf.gather`, in which `indices` defines slices into the\n first dimension of `params`. In `tf.gather_nd`, `indices` defines slices into the\n first `N` dimensions of `params`, where `N = indices.shape[-1]`.\n\n Caution: On CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, a 0 is stored in the\n corresponding output value.\n\n ## Gathering scalars\n\n In the simplest case the vectors in `indices` index the full rank of `params`:\n\n >>> tf.gather_nd(\n ... indices=[[0, 0],\n ... [1, 1]],\n ... params = [['a', 'b'],\n ... ['c', 'd']]).numpy()\n array([b'a', b'd'], dtype=object)\n\n In this case the result has 1-axis fewer than `indices`, and each index vector\n is replaced by the scalar indexed from `params`.\n\n In this case the shape relationship is:\n\n ```\n index_depth = indices.shape[-1]\n assert index_depth == params.shape.rank\n result_shape = indices.shape[:-1]\n ```\n\n If `indices` has a rank of `K`, it is helpful to think `indices` as a\n (K-1)-dimensional tensor of indices into `params`.\n\n ## Gathering slices\n\n If the index vectors do not index the full rank of `params` then each location\n in the result contains a slice of params. This example collects rows from a\n matrix:\n\n >>> tf.gather_nd(\n ... indices = [[1],\n ... [0]],\n ... params = [['a', 'b', 'c'],\n ... ['d', 'e', 'f']]).numpy()\n array([[b'd', b'e', b'f'],\n [b'a', b'b', b'c']], dtype=object)\n\n Here `indices` contains `[2]` index vectors, each with a length of `1`.\n The index vectors each refer to rows of the `params` matrix. Each\n row has a shape of `[3]` so the output shape is `[2, 3]`.\n\n In this case, the relationship between the shapes is:\n\n ```\n index_depth = indices.shape[-1]\n outer_shape = indices.shape[:-1]\n assert index_depth <= params.shape.rank\n inner_shape = params.shape[index_depth:]\n output_shape = outer_shape + inner_shape\n ```\n\n It is helpful to think of the results in this case as tensors-of-tensors.\n The shape of the outer tensor is set by the leading dimensions of `indices`.\n While the shape of the inner tensors is the shape of a single slice.\n\n ## Batches\n\n Additionally both `params` and `indices` can have `M` leading batch\n dimensions that exactly match. In this case `batch_dims` must be set to `M`.\n\n For example, to collect one row from each of a batch of matrices you could\n set the leading elements of the index vectors to be their location in the\n batch:\n\n >>> tf.gather_nd(\n ... indices = [[0, 1],\n ... [1, 0],\n ... [2, 4],\n ... [3, 2],\n ... [4, 1]],\n ... params=tf.zeros([5, 7, 3])).shape.as_list()\n [5, 3]\n\n The `batch_dims` argument lets you omit those leading location dimensions\n from the index:\n\n >>> tf.gather_nd(\n ... batch_dims=1,\n ... indices = [[1],\n ... [0],\n ... [4],\n ... [2],\n ... [1]],\n ... params=tf.zeros([5, 7, 3])).shape.as_list()\n [5, 3]\n\n This is equivalent to caling a separate `gather_nd` for each location in the\n batch dimensions.\n\n\n >>> params=tf.zeros([5, 7, 3])\n >>> indices=tf.zeros([5, 1])\n >>> batch_dims = 1\n >>>\n >>> index_depth = indices.shape[-1]\n >>> batch_shape = indices.shape[:batch_dims]\n >>> assert params.shape[:batch_dims] == batch_shape\n >>> outer_shape = indices.shape[batch_dims:-1]\n >>> assert index_depth <= params.shape.rank\n >>> inner_shape = params.shape[batch_dims + index_depth:]\n >>> output_shape = batch_shape + outer_shape + inner_shape\n >>> output_shape.as_list()\n [5, 3]\n\n ### More examples\n\n Indexing into a 3-tensor:\n\n >>> tf.gather_nd(\n ... indices = [[1]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[[b'a1', b'b1'],\n [b'c1', b'd1']]], dtype=object)\n\n\n\n >>> tf.gather_nd(\n ... indices = [[0, 1], [1, 0]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[b'c0', b'd0'],\n [b'a1', b'b1']], dtype=object)\n\n\n >>> tf.gather_nd(\n ... indices = [[0, 0, 1], [1, 0, 1]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([b'b0', b'b1'], dtype=object)\n\n The examples below are for the case when only indices have leading extra\n dimensions. If both 'params' and 'indices' have leading batch dimensions, use\n the 'batch_dims' parameter to run gather_nd in batch mode.\n\n Batched indexing into a matrix:\n\n >>> tf.gather_nd(\n ... indices = [[[0, 0]], [[0, 1]]],\n ... params = [['a', 'b'], ['c', 'd']]).numpy()\n array([[b'a'],\n [b'b']], dtype=object)\n\n\n\n Batched slice indexing into a matrix:\n\n >>> tf.gather_nd(\n ... indices = [[[1]], [[0]]],\n ... params = [['a', 'b'], ['c', 'd']]).numpy()\n array([[[b'c', b'd']],\n [[b'a', b'b']]], dtype=object)\n\n\n Batched indexing into a 3-tensor:\n\n >>> tf.gather_nd(\n ... indices = [[[1]], [[0]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[[[b'a1', b'b1'],\n [b'c1', b'd1']]],\n [[[b'a0', b'b0'],\n [b'c0', b'd0']]]], dtype=object)\n\n\n >>> tf.gather_nd(\n ... indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[[b'c0', b'd0'],\n [b'a1', b'b1']],\n [[b'a0', b'b0'],\n [b'c1', b'd1']]], dtype=object)\n\n >>> tf.gather_nd(\n ... indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[b'b0', b'b1'],\n [b'd0', b'c1']], dtype=object)\n\n\n Examples with batched 'params' and 'indices':\n\n >>> tf.gather_nd(\n ... batch_dims = 1,\n ... indices = [[1],\n ... [0]],\n ... params = [[['a0', 'b0'],\n ... ['c0', 'd0']],\n ... [['a1', 'b1'],\n ... ['c1', 'd1']]]).numpy()\n array([[b'c0', b'd0'],\n [b'a1', b'b1']], dtype=object)\n\n\n >>> tf.gather_nd(\n ... batch_dims = 1,\n ... indices = [[[1]], [[0]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[[b'c0', b'd0']],\n [[b'a1', b'b1']]], dtype=object)\n\n >>> tf.gather_nd(\n ... batch_dims = 1,\n ... indices = [[[1, 0]], [[0, 1]]],\n ... params = [[['a0', 'b0'], ['c0', 'd0']],\n ... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\n array([[b'c0'],\n [b'b1']], dtype=object)\n\n\n See also `tf.gather`.\n\n Args:\n params: A `Tensor`. The tensor from which to gather values.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n name: A name for the operation (optional).\n batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.\n\n Returns:\n A `Tensor`. Has the same type as `params`.\n "}, {"name": "get_current_name_scope", "path": "./tf/get_current_name_scope.md", "desc": "Returns current full name scope specified by
tf.name_scope(...", "type": "Functions", "docs": "Returns current full name scope specified by `tf.name_scope(...)`s.\n\n For example,\n ```python\n with tf.name_scope(\"outer\"):\n tf.get_current_name_scope() # \"outer\"\n\n with tf.name_scope(\"inner\"):\n tf.get_current_name_scope() # \"outer/inner\"\n ```\n\n In other words, `tf.get_current_name_scope()` returns the op name prefix that\n will be prepended to, if an op is created at that place.\n\n Note that `@tf.function` resets the name scope stack as shown below.\n\n ```\n with tf.name_scope(\"outer\"):\n\n @tf.function\n def foo(x):\n with tf.name_scope(\"inner\"):\n return tf.add(x * x) # Op name is \"inner/Add\", not \"outer/inner/Add\"\n ```\n "}, {"name": "get_logger", "path": "./tf/get_logger.md", "desc": "Return TF logger instance.", "type": "Functions", "docs": "Return TF logger instance."}, {"name": "get_static_value", "path": "./tf/get_static_value.md", "desc": "Returns the constant value of the given tensor, if efficiently calculable.", "type": "Functions", "docs": "Returns the constant value of the given tensor, if efficiently calculable.\n\n This function attempts to partially evaluate the given tensor, and\n returns its value as a numpy ndarray if this succeeds.\n\n Example usage:\n\n >>> a = tf.constant(10)\n >>> tf.get_static_value(a)\n 10\n >>> b = tf.constant(20)\n >>> tf.get_static_value(tf.add(a, b))\n 30\n\n >>> # `tf.Variable` is not supported.\n >>> c = tf.Variable(30)\n >>> print(tf.get_static_value(c))\n None\n\n Using `partial` option is most relevant when calling `get_static_value` inside\n a `tf.function`. Setting it to `True` will return the results but for the\n values that cannot be evaluated will be `None`. For example:\n\n ```python\n class Foo(object):\n def __init__(self):\n self.a = tf.Variable(1)\n self.b = tf.constant(2)\n\n @tf.function\n def bar(self, partial):\n packed = tf.raw_ops.Pack(values=[self.a, self.b])\n static_val = tf.get_static_value(packed, partial=partial)\n tf.print(static_val)\n\n f = Foo()\n f.bar(partial=True) # `array([None, array(2, dtype=int32)], dtype=object)`\n f.bar(partial=False) # `None`\n ```\n\n Compatibility(V1): If `constant_value(tensor)` returns a non-`None` result, it\n will no longer be possible to feed a different value for `tensor`. This allows\n the result of this function to influence the graph that is constructed, and\n permits static shape optimizations.\n\n Args:\n tensor: The Tensor to be evaluated.\n partial: If True, the returned numpy array is allowed to have partially\n evaluated values. Values that can't be evaluated will be None.\n\n Returns:\n A numpy ndarray containing the constant value of the given `tensor`,\n or None if it cannot be calculated.\n\n Raises:\n TypeError: if tensor is not an ops.Tensor.\n "}, {"name": "grad_pass_through", "path": "./tf/grad_pass_through.md", "desc": "Creates a grad-pass-through op with the forward behavior provided in f.", "type": "Functions", "docs": "Creates a grad-pass-through op with the forward behavior provided in f.\n\n Use this function to wrap any op, maintaining its behavior in the forward\n pass, but replacing the original op in the backward graph with an identity.\n For example:\n\n ```python\n x = tf.Variable(1.0, name=\"x\")\n z = tf.Variable(3.0, name=\"z\")\n\n with tf.GradientTape() as tape:\n # y will evaluate to 9.0\n y = tf.grad_pass_through(x.assign)(z**2)\n # grads will evaluate to 6.0\n grads = tape.gradient(y, z)\n ```\n\n Another example is a 'differentiable' moving average approximation, where\n gradients are allowed to flow into the last value fed to the moving average,\n but the moving average is still used for the forward pass:\n\n ```python\n x = ... # Some scalar value\n # A moving average object, we don't need to know how this is implemented\n moving_average = MovingAverage()\n with backprop.GradientTape() as tape:\n # mavg_x will evaluate to the current running average value\n mavg_x = tf.grad_pass_through(moving_average)(x)\n grads = tape.gradient(mavg_x, x) # grads will evaluate to 1.0\n ```\n\n Args:\n f: function `f(*x)` that returns a `Tensor` or nested structure of `Tensor`\n outputs.\n\n Returns:\n A function `h(x)` which returns the same values as `f(x)` and whose\n gradients are the same as those of an identity function.\n "}, {"name": "gradients", "path": "./tf/gradients.md", "desc": "Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.", "type": "Functions", "docs": "Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.\n\n `tf.gradients` is only valid in a graph context. In particular,\n it is valid in the context of a `tf.function` wrapper, where code\n is executing as a graph.\n\n `ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`\n is a list of `Tensor`, holding the gradients received by the\n `ys`. The list must be the same length as `ys`.\n\n `gradients()` adds ops to the graph to output the derivatives of `ys` with\n respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where\n each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`.\n\n `grad_ys` is a list of tensors of the same length as `ys` that holds\n the initial gradients for each y in `ys`. When `grad_ys` is None,\n we fill in a tensor of '1's of the shape of y for each y in `ys`. A\n user can provide their own initial `grad_ys` to compute the\n derivatives using a different initial gradient for each y (e.g., if\n one wanted to weight the gradient differently for each value in\n each y).\n\n `stop_gradients` is a `Tensor` or a list of tensors to be considered constant\n with respect to all `xs`. These tensors will not be backpropagated through,\n as though they had been explicitly disconnected using `stop_gradient`. Among\n other things, this allows computation of partial derivatives as opposed to\n total derivatives. For example:\n\n >>> @tf.function\n ... def example():\n ... a = tf.constant(0.)\n ... b = 2 * a\n ... return tf.gradients(a + b, [a, b], stop_gradients=[a, b])\n >>> example()\n [,\n ]\n\n Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the\n total derivatives `tf.gradients(a + b, [a, b])`, which take into account the\n influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is\n equivalent to:\n\n >>> @tf.function\n ... def example():\n ... a = tf.stop_gradient(tf.constant(0.))\n ... b = tf.stop_gradient(2 * a)\n ... return tf.gradients(a + b, [a, b])\n >>> example()\n [,\n ]\n\n `stop_gradients` provides a way of stopping gradient after the graph has\n already been constructed, as compared to `tf.stop_gradient` which is used\n during graph construction. When the two approaches are combined,\n backpropagation stops at both `tf.stop_gradient` nodes and nodes in\n `stop_gradients`, whichever is encountered first.\n\n All integer tensors are considered constant with respect to all `xs`, as if\n they were included in `stop_gradients`.\n\n `unconnected_gradients` determines the value returned for each x in xs if it\n is unconnected in the graph to ys. By default this is None to safeguard\n against errors. Mathematically these gradients are zero which can be requested\n using the `'zero'` option. `tf.UnconnectedGradients` provides the\n following options and behaviors:\n\n >>> @tf.function\n ... def example(use_zero):\n ... a = tf.ones([1, 2])\n ... b = tf.ones([3, 1])\n ... if use_zero:\n ... return tf.gradients([b], [a], unconnected_gradients='zero')\n ... else:\n ... return tf.gradients([b], [a], unconnected_gradients='none')\n >>> example(False)\n [None]\n >>> example(True)\n []\n\n Let us take one practical example which comes during the back propogation\n phase. This function is used to evaluate the derivatives of the cost function\n with respect to Weights `Ws` and Biases `bs`. Below sample implementation\n provides the exaplantion of what it is actually used for :\n\n >>> @tf.function\n ... def example():\n ... Ws = tf.constant(0.)\n ... bs = 2 * Ws\n ... cost = Ws + bs # This is just an example. Please ignore the formulas.\n ... g = tf.gradients(cost, [Ws, bs])\n ... dCost_dW, dCost_db = g\n ... return dCost_dW, dCost_db\n >>> example()\n (,\n )\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n grad_ys: Optional. A `Tensor` or list of tensors the same size as\n `ys` and holding the gradients computed for each y in `ys`.\n name: Optional name to use for grouping all the gradient ops together.\n defaults to 'gradients'.\n gate_gradients: If True, add a tuple around the gradients returned\n for an operations. This avoids some race conditions.\n aggregation_method: Specifies the method used to combine gradient terms.\n Accepted values are constants defined in the class `AggregationMethod`.\n stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate\n through.\n unconnected_gradients: Optional. Specifies the gradient value returned when\n the given input tensors are unconnected. Accepted values are constants\n defined in the class `tf.UnconnectedGradients` and the default value is\n `none`.\n\n Returns:\n A list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`\n for y in `ys` and for x in `xs`.\n\n Raises:\n LookupError: if one of the operations between `x` and `y` does not\n have a registered gradient function.\n ValueError: if the arguments are invalid.\n RuntimeError: if called in Eager mode.\n\n "}, {"name": "greater", "path": "./tf/math/greater.md", "desc": "Returns the truth value of (x > y", "type": "Functions", "docs": "Returns the truth value of (x > y) element-wise.\n\n *NOTE*: `math.greater` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Example:\n\n ```python\n x = tf.constant([5, 4, 6])\n y = tf.constant([5, 2, 5])\n tf.math.greater(x, y) ==> [False, True, True]\n\n x = tf.constant([5, 4, 6])\n y = tf.constant([5])\n tf.math.greater(x, y) ==> [False, False, True]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n "}, {"name": "greater_equal", "path": "./tf/math/greater_equal.md", "desc": "Returns the truth value of (x >= y", "type": "Functions", "docs": "Returns the truth value of (x >= y) element-wise.\n\n *NOTE*: `math.greater_equal` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Example:\n\n ```python\n x = tf.constant([5, 4, 6, 7])\n y = tf.constant([5, 2, 5, 10])\n tf.math.greater_equal(x, y) ==> [True, True, True, False]\n\n x = tf.constant([5, 4, 6, 7])\n y = tf.constant([5])\n tf.math.greater_equal(x, y) ==> [True, False, True, True]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n "}, {"name": "group", "path": "./tf/group.md", "desc": "Create an op that groups multiple operations.", "type": "Functions", "docs": "Create an op that groups multiple operations.\n\n When this op finishes, all ops in `inputs` have finished. This op has no\n output.\n\n Note: *In TensorFlow 2 with eager and/or Autograph, you should not require\n this method, as ops execute in the expected order thanks to automatic control\n dependencies.* Only use `tf.group` when working with v1\n `tf.Graph` code.\n\n When operating in a v1-style graph context, ops are not executed in the same\n order as specified in the code; TensorFlow will attempt to execute ops in\n parallel or in an order convenient to the result it is computing. `tf.group`\n allows you to request that one or more results finish before execution\n continues.\n\n `tf.group` creates a single op (of type `NoOp`), and then adds appropriate\n control dependencies. Thus, `c = tf.group(a, b)` will compute the same graph\n as this:\n\n with tf.control_dependencies([a, b]):\n c = tf.no_op()\n\n See also `tf.tuple` and\n `tf.control_dependencies`.\n\n Args:\n *inputs: Zero or more tensors to group.\n name: A name for this operation (optional).\n\n Returns:\n An Operation that executes all its inputs.\n\n Raises:\n ValueError: If an unknown keyword argument is provided.\n "}, {"name": "guarantee_const", "path": "./tf/guarantee_const.md", "desc": "Promise to the TF runtime that the input tensor is a constant. (deprecated", "type": "Functions", "docs": "Promise to the TF runtime that the input tensor is a constant. (deprecated)\n\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\nInstructions for updating:\nNot for public use.\n\nThe runtime is then free to make optimizations based on this.\n\nReturns the input tensor without modification.\n\nArgs:\n input: A `Tensor`.\n name: A name for this operation.\n\nReturns:\n A `Tensor`. Has the same dtype as `input`."}, {"name": "hessians", "path": "./tf/hessians.md", "desc": "Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.", "type": "Functions", "docs": "Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.\n\n `hessians()` adds ops to the graph to output the Hessian matrix of `ys`\n with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`\n where each tensor is the Hessian of `sum(ys)`.\n\n The Hessian is a matrix of second-order partial derivatives of a scalar\n tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n gate_gradients: See `gradients()` documentation for details.\n aggregation_method: See `gradients()` documentation for details.\n name: Optional name to use for grouping all the gradient ops together.\n defaults to 'hessians'.\n\n Returns:\n A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.\n\n Raises:\n LookupError: if one of the operations between `xs` and `ys` does not\n have a registered gradient function.\n "}, {"name": "histogram_fixed_width", "path": "./tf/histogram_fixed_width.md", "desc": "Return histogram of values.", "type": "Functions", "docs": "Return histogram of values.\n\n Given the tensor `values`, this operation returns a rank 1 histogram counting\n the number of entries in `values` that fell into every bin. The bins are\n equal width and determined by the arguments `value_range` and `nbins`.\n\n Args:\n values: Numeric `Tensor`.\n value_range: Shape [2] `Tensor` of same `dtype` as `values`.\n values <= value_range[0] will be mapped to hist[0],\n values >= value_range[1] will be mapped to hist[-1].\n nbins: Scalar `int32 Tensor`. Number of histogram bins.\n dtype: dtype for returned histogram.\n name: A name for this operation (defaults to 'histogram_fixed_width').\n\n Returns:\n A 1-D `Tensor` holding histogram of values.\n\n Raises:\n TypeError: If any unsupported dtype is provided.\n tf.errors.InvalidArgumentError: If value_range does not\n satisfy value_range[0] < value_range[1].\n\n Examples:\n\n >>> # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)\n ...\n >>> nbins = 5\n >>> value_range = [0.0, 5.0]\n >>> new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]\n >>> hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)\n >>> hist.numpy()\n array([2, 1, 1, 0, 2], dtype=int32)\n "}, {"name": "histogram_fixed_width_bins", "path": "./tf/histogram_fixed_width_bins.md", "desc": "Bins the given values for use in a histogram.", "type": "Functions", "docs": "Bins the given values for use in a histogram.\n\n Given the tensor `values`, this operation returns a rank 1 `Tensor`\n representing the indices of a histogram into which each element\n of `values` would be binned. The bins are equal width and\n determined by the arguments `value_range` and `nbins`.\n\n Args:\n values: Numeric `Tensor`.\n value_range: Shape [2] `Tensor` of same `dtype` as `values`.\n values <= value_range[0] will be mapped to hist[0],\n values >= value_range[1] will be mapped to hist[-1].\n nbins: Scalar `int32 Tensor`. Number of histogram bins.\n dtype: dtype for returned histogram.\n name: A name for this operation (defaults to 'histogram_fixed_width').\n\n Returns:\n A `Tensor` holding the indices of the binned values whose shape matches\n `values`.\n\n Raises:\n TypeError: If any unsupported dtype is provided.\n tf.errors.InvalidArgumentError: If value_range does not\n satisfy value_range[0] < value_range[1].\n\n Examples:\n\n >>> # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)\n ...\n >>> nbins = 5\n >>> value_range = [0.0, 5.0]\n >>> new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]\n >>> indices = tf.histogram_fixed_width_bins(new_values, value_range, nbins=5)\n >>> indices.numpy()\n array([0, 0, 1, 2, 4, 4], dtype=int32)\n "}, {"name": "identity", "path": "./tf/identity.md", "desc": "Return a Tensor with the same shape and contents as input.", "type": "Functions", "docs": "Return a Tensor with the same shape and contents as input.\n\n The return value is not the same Tensor as the original, but contains the same\n values. This operation is fast when used on the same device.\n\n For example:\n\n >>> a = tf.constant([0.78])\n >>> a_identity = tf.identity(a)\n >>> a.numpy()\n array([0.78], dtype=float32)\n >>> a_identity.numpy()\n array([0.78], dtype=float32)\n\n Calling `tf.identity` on a variable will make a Tensor that represents the\n value of that variable at the time it is called. This is equivalent to calling\n `.read_value()`.\n\n >>> a = tf.Variable(5)\n >>> a_identity = tf.identity(a)\n >>> a.assign_add(1)\n \n >>> a.numpy()\n 6\n >>> a_identity.numpy()\n 5\n\n Args:\n input: A `Tensor`, a `Variable`, a `CompositeTensor` or anything that can be\n converted to a tensor using `tf.convert_to_tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or CompositeTensor. Has the same type and contents as `input`.\n "}, {"name": "identity_n", "path": "./tf/identity_n.md", "desc": "Returns a list of tensors with the same shapes and contents as the input", "type": "Functions", "docs": "Returns a list of tensors with the same shapes and contents as the input\n\n tensors.\n\n This op can be used to override the gradient for complicated functions. For\n example, suppose y = f(x) and we wish to apply a custom function g for backprop\n such that dx = g(dy). In Python,\n\n ```python\n with tf.get_default_graph().gradient_override_map(\n {'IdentityN': 'OverrideGradientWithG'}):\n y, _ = identity_n([f(x), x])\n\n @tf.RegisterGradient('OverrideGradientWithG')\n def ApplyG(op, dy, _):\n return [None, g(dy)] # Do not backprop to f(x).\n ```\n\n Args:\n input: A list of `Tensor` objects.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects. Has the same type as `input`.\n "}, {"name": "import_graph_def", "path": "./tf/graph_util/import_graph_def.md", "desc": "Imports the graph from `graph_def` into the current default `Graph`. (deprecated arguments", "type": "Functions", "docs": "Imports the graph from `graph_def` into the current default `Graph`. (deprecated arguments)\n\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(op_dict)`. They will be removed in a future version.\nInstructions for updating:\nPlease file an issue at https://github.com/tensorflow/tensorflow/issues if you depend on this feature.\n\nThis function provides a way to import a serialized TensorFlow\n[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\nprotocol buffer, and extract individual objects in the `GraphDef` as\n`tf.Tensor` and `tf.Operation` objects. Once extracted,\nthese objects are placed into the current default `Graph`. See\n`tf.Graph.as_graph_def` for a way to create a `GraphDef`\nproto.\n\nArgs:\n graph_def: A `GraphDef` proto containing operations to be imported into\n the default graph.\n input_map: A dictionary mapping input names (as strings) in `graph_def`\n to `Tensor` objects. The values of the named input tensors in the\n imported graph will be re-mapped to the respective `Tensor` values.\n return_elements: A list of strings containing operation names in\n `graph_def` that will be returned as `Operation` objects; and/or\n tensor names in `graph_def` that will be returned as `Tensor` objects.\n name: (Optional.) A prefix that will be prepended to the names in\n `graph_def`. Note that this does not apply to imported function names.\n Defaults to `\"import\"`.\n op_dict: (Optional.) Deprecated, do not use.\n producer_op_list: (Optional.) An `OpList` proto with the (possibly stripped)\n list of `OpDef`s used by the producer of the graph. If provided,\n unrecognized attrs for ops in `graph_def` that have their default value\n according to `producer_op_list` will be removed. This will allow some more\n `GraphDef`s produced by later binaries to be accepted by earlier binaries.\n\nReturns:\n A list of `Operation` and/or `Tensor` objects from the imported graph,\n corresponding to the names in `return_elements`,\n and None if `returns_elements` is None.\n\nRaises:\n TypeError: If `graph_def` is not a `GraphDef` proto,\n `input_map` is not a dictionary mapping strings to `Tensor` objects,\n or `return_elements` is not a list of strings.\n ValueError: If `input_map`, or `return_elements` contains names that\n do not appear in `graph_def`, or `graph_def` is not well-formed (e.g.\n it refers to an unknown tensor)."}, {"name": "init_scope", "path": "./tf/init_scope.md", "desc": "A context manager that lifts ops out of control-flow scopes and function-building graphs.", "type": "Functions", "docs": "A context manager that lifts ops out of control-flow scopes and function-building graphs.\n\n There is often a need to lift variable initialization ops out of control-flow\n scopes, function-building graphs, and gradient tapes. Entering an\n `init_scope` is a mechanism for satisfying these desiderata. In particular,\n entering an `init_scope` has three effects:\n\n (1) All control dependencies are cleared the moment the scope is entered;\n this is equivalent to entering the context manager returned from\n `control_dependencies(None)`, which has the side-effect of exiting\n control-flow scopes like `tf.cond` and `tf.while_loop`.\n\n (2) All operations that are created while the scope is active are lifted\n into the lowest context on the `context_stack` that is not building a\n graph function. Here, a context is defined as either a graph or an eager\n context. Every context switch, i.e., every installation of a graph as\n the default graph and every switch into eager mode, is logged in a\n thread-local stack called `context_switches`; the log entry for a\n context switch is popped from the stack when the context is exited.\n Entering an `init_scope` is equivalent to crawling up\n `context_switches`, finding the first context that is not building a\n graph function, and entering it. A caveat is that if graph mode is\n enabled but the default graph stack is empty, then entering an\n `init_scope` will simply install a fresh graph as the default one.\n\n (3) The gradient tape is paused while the scope is active.\n\n When eager execution is enabled, code inside an init_scope block runs with\n eager execution enabled even when tracing a `tf.function`. For example:\n\n ```python\n tf.compat.v1.enable_eager_execution()\n\n @tf.function\n def func():\n # A function constructs TensorFlow graphs,\n # it does not execute eagerly.\n assert not tf.executing_eagerly()\n with tf.init_scope():\n # Initialization runs with eager execution enabled\n assert tf.executing_eagerly()\n ```\n\n Raises:\n RuntimeError: if graph state is incompatible with this initialization.\n "}, {"name": "inside_function", "path": "./tf/inside_function.md", "desc": "Indicates whether the caller code is executing inside a tf.function.", "type": "Functions", "docs": "Indicates whether the caller code is executing inside a `tf.function`.\n\n Returns:\n Boolean, True if the caller code is executing inside a `tf.function`\n rather than eagerly.\n\n Example:\n\n >>> tf.inside_function()\n False\n >>> @tf.function\n ... def f():\n ... print(tf.inside_function())\n >>> f()\n True\n "}, {"name": "is_tensor", "path": "./tf/is_tensor.md", "desc": "Checks whether `x` is a TF-native type that can be passed to many TF ops.", "type": "Functions", "docs": "Checks whether `x` is a TF-native type that can be passed to many TF ops.\n\n Use `is_tensor` to differentiate types that can ingested by TensorFlow ops\n without any conversion (e.g., `tf.Tensor`, `tf.SparseTensor`, and\n `tf.RaggedTensor`) from types that need to be converted into tensors before\n they are ingested (e.g., numpy `ndarray` and Python scalars).\n\n For example, in the following code block:\n\n ```python\n if not tf.is_tensor(t):\n t = tf.convert_to_tensor(t)\n return t.shape, t.dtype\n ```\n\n we check to make sure that `t` is a tensor (and convert it if not) before\n accessing its `shape` and `dtype`. (But note that not all TensorFlow native\n types have shapes or dtypes; `tf.data.Dataset` is an example of a TensorFlow\n native type that has neither shape nor dtype.)\n\n Args:\n x: A python object to check.\n\n Returns:\n `True` if `x` is a TensorFlow-native type.\n "}, {"name": "less", "path": "./tf/math/less.md", "desc": "Returns the truth value of (x < y", "type": "Functions", "docs": "Returns the truth value of (x < y) element-wise.\n\n *NOTE*: `math.less` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Example:\n\n ```python\n x = tf.constant([5, 4, 6])\n y = tf.constant([5])\n tf.math.less(x, y) ==> [False, True, False]\n\n x = tf.constant([5, 4, 6])\n y = tf.constant([5, 6, 7])\n tf.math.less(x, y) ==> [False, True, True]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n "}, {"name": "less_equal", "path": "./tf/math/less_equal.md", "desc": "Returns the truth value of (x <= y", "type": "Functions", "docs": "Returns the truth value of (x <= y) element-wise.\n\n *NOTE*: `math.less_equal` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Example:\n\n ```python\n x = tf.constant([5, 4, 6])\n y = tf.constant([5])\n tf.math.less_equal(x, y) ==> [True, True, False]\n\n x = tf.constant([5, 4, 6])\n y = tf.constant([5, 6, 6])\n tf.math.less_equal(x, y) ==> [True, True, True]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n "}, {"name": "linspace", "path": "./tf/linspace.md", "desc": "Generates evenly-spaced values in an interval along a given axis.", "type": "Functions", "docs": "Generates evenly-spaced values in an interval along a given axis.\n\n A sequence of `num` evenly-spaced values are generated beginning at `start`\n along a given `axis`.\n If `num > 1`, the values in the sequence increase by\n `(stop - start) / (num - 1)`, so that the last one is exactly `stop`.\n If `num <= 0`, `ValueError` is raised.\n\n Matches\n [np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s\n behaviour\n except when `num == 0`.\n\n For example:\n\n ```\n tf.linspace(10.0, 12.0, 3, name=\"linspace\") => [ 10.0 11.0 12.0]\n ```\n\n `Start` and `stop` can be tensors of arbitrary size:\n\n >>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)\n \n\n `Axis` is where the values will be generated (the dimension in the\n returned tensor which corresponds to the axis will be equal to `num`)\n\n >>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)\n \n\n\n\n Args:\n start: A `Tensor`. Must be one of the following types: `bfloat16`,\n `float32`, `float64`. N-D tensor. First entry in the range.\n stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.\n Last entry in the range.\n num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D\n tensor. Number of values to generate.\n name: A name for the operation (optional).\n axis: Axis along which the operation is performed (used only when N-D\n tensors are provided).\n\n Returns:\n A `Tensor`. Has the same type as `start`.\n "}, {"name": "load_library", "path": "./tf/load_library.md", "desc": "Loads a TensorFlow plugin.", "type": "Functions", "docs": "Loads a TensorFlow plugin.\n\n \"library_location\" can be a path to a specific shared object, or a folder.\n If it is a folder, all shared objects that are named \"libtfkernel*\" will be\n loaded. When the library is loaded, kernels registered in the library via the\n `REGISTER_*` macros are made available in the TensorFlow process.\n\n Args:\n library_location: Path to the plugin or the folder of plugins.\n Relative or absolute filesystem path to a dynamic library file or folder.\n\n Returns:\n None\n\n Raises:\n OSError: When the file to be loaded is not found.\n RuntimeError: when unable to load the library.\n "}, {"name": "load_op_library", "path": "./tf/load_op_library.md", "desc": "Loads a TensorFlow plugin, containing custom ops and kernels.", "type": "Functions", "docs": "Loads a TensorFlow plugin, containing custom ops and kernels.\n\n Pass \"library_filename\" to a platform-specific mechanism for dynamically\n loading a library. The rules for determining the exact location of the\n library are platform-specific and are not documented here. When the\n library is loaded, ops and kernels registered in the library via the\n `REGISTER_*` macros are made available in the TensorFlow process. Note\n that ops with the same name as an existing op are rejected and not\n registered with the process.\n\n Args:\n library_filename: Path to the plugin.\n Relative or absolute filesystem path to a dynamic library file.\n\n Returns:\n A python module containing the Python wrappers for Ops defined in\n the plugin.\n\n Raises:\n RuntimeError: when unable to load the library or get the python wrappers.\n "}, {"name": "logical_and", "path": "./tf/math/logical_and.md", "desc": "Returns the truth value of x AND y element-wise.", "type": "Functions", "docs": "Returns the truth value of x AND y element-wise.\n\n Logical AND function.\n\n Requires that `x` and `y` have the same shape or have\n [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n shapes. For example, `x` and `y` can be:\n\n - Two single elements of type `bool`.\n - One `tf.Tensor` of type `bool` and one single `bool`, where the result will\n be calculated by applying logical AND with the single element to each\n element in the larger Tensor.\n - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,\n the result will be the element-wise logical AND of the two input tensors.\n\n You can also use the `&` operator instead.\n\n Usage:\n\n >>> a = tf.constant([True])\n >>> b = tf.constant([False])\n >>> tf.math.logical_and(a, b)\n \n >>> a & b\n \n\n >>> c = tf.constant([True])\n >>> x = tf.constant([False, True, True, False])\n >>> tf.math.logical_and(c, x)\n \n >>> c & x\n \n\n >>> y = tf.constant([False, False, True, True])\n >>> z = tf.constant([False, True, False, True])\n >>> tf.math.logical_and(y, z)\n \n >>> y & z\n \n\n This op also supports broadcasting\n\n >>> tf.logical_and([[True, False]], [[True], [False]])\n \n\n The reduction version of this elementwise operation is `tf.math.reduce_all`.\n\n Args:\n x: A `tf.Tensor` of type bool.\n y: A `tf.Tensor` of type bool.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of type bool with the shape that `x` and `y` broadcast to.\n\n Args:\n x: A `Tensor` of type `bool`.\n y: A `Tensor` of type `bool`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n "}, {"name": "logical_not", "path": "./tf/math/logical_not.md", "desc": "Returns the truth value of `NOT x` element-wise.", "type": "Functions", "docs": "Returns the truth value of `NOT x` element-wise.\n\n Example:\n\n >>> tf.math.logical_not(tf.constant([True, False]))\n \n\n Args:\n x: A `Tensor` of type `bool`. A `Tensor` of type `bool`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n "}, {"name": "logical_or", "path": "./tf/math/logical_or.md", "desc": "Returns the truth value of x OR y element-wise.", "type": "Functions", "docs": "Returns the truth value of x OR y element-wise.\n\n Logical OR function.\n\n Requires that `x` and `y` have the same shape or have\n [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n shapes. For example, `x` and `y` can be:\n\n - Two single elements of type `bool`.\n - One `tf.Tensor` of type `bool` and one single `bool`, where the result will\n be calculated by applying logical OR with the single element to each\n element in the larger Tensor.\n - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,\n the result will be the element-wise logical OR of the two input tensors.\n\n You can also use the `|` operator instead.\n\n Usage:\n\n >>> a = tf.constant([True])\n >>> b = tf.constant([False])\n >>> tf.math.logical_or(a, b)\n \n >>> a | b\n \n\n >>> c = tf.constant([False])\n >>> x = tf.constant([False, True, True, False])\n >>> tf.math.logical_or(c, x)\n \n >>> c | x\n \n\n >>> y = tf.constant([False, False, True, True])\n >>> z = tf.constant([False, True, False, True])\n >>> tf.math.logical_or(y, z)\n \n >>> y | z\n \n\n This op also supports broadcasting\n\n >>> tf.logical_or([[True, False]], [[True], [False]])\n \n\n The reduction version of this elementwise operation is `tf.math.reduce_any`.\n\n Args:\n x: A `tf.Tensor` of type bool.\n y: A `tf.Tensor` of type bool.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of type bool with the shape that `x` and `y` broadcast to.\n\n Args:\n x: A `Tensor` of type `bool`.\n y: A `Tensor` of type `bool`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n "}, {"name": "make_ndarray", "path": "./tf/make_ndarray.md", "desc": "Create a numpy ndarray from a tensor.", "type": "Functions", "docs": "Create a numpy ndarray from a tensor.\n\n Create a numpy ndarray with the same shape and data as the tensor.\n\n For example:\n\n ```python\n # Tensor a has shape (2,3)\n a = tf.constant([[1,2,3],[4,5,6]])\n proto_tensor = tf.make_tensor_proto(a) # convert `tensor a` to a proto tensor\n tf.make_ndarray(proto_tensor) # output: array([[1, 2, 3],\n # [4, 5, 6]], dtype=int32)\n # output has shape (2,3)\n ```\n\n Args:\n tensor: A TensorProto.\n\n Returns:\n A numpy array with the tensor contents.\n\n Raises:\n TypeError: if tensor has unsupported type.\n\n "}, {"name": "make_tensor_proto", "path": "./tf/make_tensor_proto.md", "desc": "Create a TensorProto.", "type": "Functions", "docs": "Create a TensorProto.\n\n In TensorFlow 2.0, representing tensors as protos should no longer be a\n common workflow. That said, this utility function is still useful for\n generating TF Serving request protos:\n\n ```python\n request = tensorflow_serving.apis.predict_pb2.PredictRequest()\n request.model_spec.name = \"my_model\"\n request.model_spec.signature_name = \"serving_default\"\n request.inputs[\"images\"].CopyFrom(tf.make_tensor_proto(X_new))\n ```\n\n `make_tensor_proto` accepts \"values\" of a python scalar, a python list, a\n numpy ndarray, or a numpy scalar.\n\n If \"values\" is a python scalar or a python list, make_tensor_proto\n first convert it to numpy ndarray. If dtype is None, the\n conversion tries its best to infer the right numpy data\n type. Otherwise, the resulting numpy array has a compatible data\n type with the given dtype.\n\n In either case above, the numpy ndarray (either the caller provided\n or the auto-converted) must have the compatible type with dtype.\n\n `make_tensor_proto` then converts the numpy array to a tensor proto.\n\n If \"shape\" is None, the resulting tensor proto represents the numpy\n array precisely.\n\n Otherwise, \"shape\" specifies the tensor's shape and the numpy array\n can not have more elements than what \"shape\" specifies.\n\n Args:\n values: Values to put in the TensorProto.\n dtype: Optional tensor_pb2 DataType value.\n shape: List of integers representing the dimensions of tensor.\n verify_shape: Boolean that enables verification of a shape of values.\n allow_broadcast: Boolean that enables allowing scalars and 1 length vector\n broadcasting. Cannot be true when verify_shape is true.\n\n Returns:\n A `TensorProto`. Depending on the type, it may contain data in the\n \"tensor_content\" attribute, which is not directly useful to Python programs.\n To access the values you should convert the proto back to a numpy ndarray\n with `tf.make_ndarray(proto)`.\n\n If `values` is a `TensorProto`, it is immediately returned; `dtype` and\n `shape` are ignored.\n\n Raises:\n TypeError: if unsupported types are provided.\n ValueError: if arguments have inappropriate values or if verify_shape is\n True and shape of values is not equals to a shape from the argument.\n\n "}, {"name": "map_fn", "path": "./tf/map_fn.md", "desc": "Transforms `elems` by applying `fn` to each element unstacked on axis 0. (deprecated arguments", "type": "Functions", "docs": "Transforms `elems` by applying `fn` to each element unstacked on axis 0. (deprecated arguments)\n\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(dtype)`. They will be removed in a future version.\nInstructions for updating:\nUse fn_output_signature instead\n\nSee also `tf.scan`.\n\n`map_fn` unstacks `elems` on axis 0 to obtain a sequence of elements;\ncalls `fn` to transform each element; and then stacks the transformed\nvalues back together.\n\n#### Mapping functions with single-Tensor inputs and outputs\n\nIf `elems` is a single tensor and `fn`'s signature is `tf.Tensor->tf.Tensor`,\nthen `map_fn(fn, elems)` is equivalent to\n`tf.stack([fn(elem) for elem in tf.unstack(elems)])`. E.g.:\n\n>>> tf.map_fn(fn=lambda t: tf.range(t, t + 3), elems=tf.constant([3, 5, 2]))\n\n\n`map_fn(fn, elems).shape = [elems.shape[0]] + fn(elems[0]).shape`.\n\n#### Mapping functions with multi-arity inputs and outputs\n\n`map_fn` also supports functions with multi-arity inputs and outputs:\n\n* If `elems` is a tuple (or nested structure) of tensors, then those tensors\n must all have the same outer-dimension size (`num_elems`); and `fn` is\n used to transform each tuple (or structure) of corresponding slices from\n `elems`. E.g., if `elems` is a tuple `(t1, t2, t3)`, then `fn` is used to\n transform each tuple of slices `(t1[i], t2[i], t3[i])`\n (where `0 <= i < num_elems`).\n\n* If `fn` returns a tuple (or nested structure) of tensors, then the\n result is formed by stacking corresponding elements from those structures.\n\n#### Specifying `fn`'s output signature\n\nIf `fn`'s input and output signatures are different, then the output\nsignature must be specified using `fn_output_signature`. (The input and\noutput signatures are differ if their structures, dtypes, or tensor types do\nnot match). E.g.:\n\n>>> tf.map_fn(fn=tf.strings.length, # input & output have different dtypes\n... elems=tf.constant([\"hello\", \"moon\"]),\n... fn_output_signature=tf.int32)\n\n>>> tf.map_fn(fn=tf.strings.join, # input & output have different structures\n... elems=[tf.constant(['The', 'A']), tf.constant(['Dog', 'Cat'])],\n... fn_output_signature=tf.string)\n\n\n`fn_output_signature` can be specified using any of the following:\n\n* A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)\n* A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)\n* A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)\n* A (possibly nested) tuple, list, or dict containing the above types.\n\n#### RaggedTensors\n\n`map_fn` supports `tf.RaggedTensor` inputs and outputs. In particular:\n\n* If `elems` is a `RaggedTensor`, then `fn` will be called with each\n row of that ragged tensor.\n * If `elems` has only one ragged dimension, then the values passed to\n `fn` will be `tf.Tensor`s.\n * If `elems` has multiple ragged dimensions, then the values passed to\n `fn` will be `tf.RaggedTensor`s with one fewer ragged dimension.\n\n* If the result of `map_fn` should be a `RaggedTensor`, then use a\n `tf.RaggedTensorSpec` to specify `fn_output_signature`.\n * If `fn` returns `tf.Tensor`s with varying sizes, then use a\n `tf.RaggedTensorSpec` with `ragged_rank=0` to combine them into a\n single ragged tensor (which will have ragged_rank=1).\n * If `fn` returns `tf.RaggedTensor`s, then use a `tf.RaggedTensorSpec`\n with the same `ragged_rank`.\n\n>>> # Example: RaggedTensor input\n>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])\n>>> tf.map_fn(tf.reduce_sum, rt, fn_output_signature=tf.int32)\n\n\n>>> # Example: RaggedTensor output\n>>> elems = tf.constant([3, 5, 0, 2])\n>>> tf.map_fn(tf.range, elems,\n... fn_output_signature=tf.RaggedTensorSpec(shape=[None],\n... dtype=tf.int32))\n\n\nNote: `map_fn` should only be used if you need to map a function over the\n*rows* of a `RaggedTensor`. If you wish to map a function over the\nindividual values, then you should use:\n\n* `tf.ragged.map_flat_values(fn, rt)`\n (if fn is expressible as TensorFlow ops)\n* `rt.with_flat_values(map_fn(fn, rt.flat_values))`\n (otherwise)\n\nE.g.:\n\n>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])\n>>> tf.ragged.map_flat_values(lambda x: x + 2, rt)\n\n\n#### SparseTensors\n\n`map_fn` supports `tf.sparse.SparseTensor` inputs and outputs. In particular:\n\n* If `elems` is a `SparseTensor`, then `fn` will be called with each row\n of that sparse tensor. In particular, the value passed to `fn` will be a\n `tf.sparse.SparseTensor` with one fewer dimension than `elems`.\n\n* If the result of `map_fn` should be a `SparseTensor`, then use a\n `tf.SparseTensorSpec` to specify `fn_output_signature`. The individual\n `SparseTensor`s returned by `fn` will be stacked into a single\n `SparseTensor` with one more dimension.\n\n>>> # Example: SparseTensor input\n>>> st = tf.sparse.SparseTensor([[0, 0], [2, 0], [2, 1]], [2, 3, 4], [4, 4])\n>>> tf.map_fn(tf.sparse.reduce_sum, st, fn_output_signature=tf.int32)\n\n\n>>> # Example: SparseTensor output\n>>> tf.sparse.to_dense(\n... tf.map_fn(tf.sparse.eye, tf.constant([2, 3]),\n... fn_output_signature=tf.SparseTensorSpec(None, tf.float32)))\n\n\nNote: `map_fn` should only be used if you need to map a function over the\n*rows* of a `SparseTensor`. If you wish to map a function over the nonzero\nvalues, then you should use:\n\n* If the function is expressible as TensorFlow ops, use:\n ```python\n tf.sparse.SparseTensor(st.indices, fn(st.values), st.dense_shape)\n ```\n* Otherwise, use:\n ```python\n tf.sparse.SparseTensor(st.indices, tf.map_fn(fn, st.values),\n st.dense_shape)\n ```\n\n#### `map_fn` vs. vectorized operations\n\n`map_fn` will apply the operations used by `fn` to each element of `elems`,\nresulting in `O(elems.shape[0])` total operations. This is somewhat\nmitigated by the fact that `map_fn` can process elements in parallel.\nHowever, a transform expressed using `map_fn` is still typically less\nefficient than an equivalent transform expressed using vectorized operations.\n\n`map_fn` should typically only be used if one of the following is true:\n\n* It is difficult or expensive to express the desired transform with\n vectorized operations.\n* `fn` creates large intermediate values, so an equivalent vectorized\n transform would take too much memory.\n* Processing elements in parallel is more efficient than an equivalent\n vectorized transform.\n* Efficiency of the transform is not critical, and using `map_fn` is\n more readable.\n\nE.g., the example given above that maps `fn=lambda t: tf.range(t, t + 3)`\nacross `elems` could be rewritten more efficiently using vectorized ops:\n\n>>> elems = tf.constant([3, 5, 2])\n>>> tf.range(3) + tf.expand_dims(elems, 1)\n\n\nIn some cases, `tf.vectorized_map` can be used to automatically convert a\nfunction to a vectorized equivalent.\n\n#### Eager execution\n\nWhen executing eagerly, `map_fn` does not execute in parallel even if\n`parallel_iterations` is set to a value > 1. You can still get the\nperformance benefits of running a function in parallel by using the\n`tf.function` decorator:\n\n>>> fn=lambda t: tf.range(t, t + 3)\n>>> @tf.function\n... def func(elems):\n... return tf.map_fn(fn, elems, parallel_iterations=3)\n>>> func(tf.constant([3, 5, 2]))\n\n\n\nNote: if you use the `tf.function` decorator, any non-TensorFlow Python\ncode that you may have written in your function won't get executed. See\n`tf.function` for more details. The recommendation would be to debug without\n`tf.function` but switch to it to get performance benefits of running `map_fn`\nin parallel.\n\nArgs:\n fn: The callable to be performed. It accepts one argument, which will have\n the same (possibly nested) structure as `elems`. Its output must have the\n same structure as `fn_output_signature` if one is provided; otherwise it\n must have the same structure as `elems`.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unstacked along their first dimension. `fn` will be applied to the\n nested sequence of the resulting slices. `elems` may include ragged and\n sparse tensors. `elems` must consist of at least one tensor.\n dtype: Deprecated: Equivalent to `fn_output_signature`.\n parallel_iterations: (optional) The number of iterations allowed to run in\n parallel. When graph building, the default value is 10. While executing\n eagerly, the default value is set to 1.\n back_prop: (optional) Deprecated: prefer using `tf.stop_gradient` instead. False disables support for back propagation.\n swap_memory: (optional) True enables GPU-CPU memory swapping.\n infer_shape: (optional) False disables tests for consistent output shapes.\n name: (optional) Name prefix for the returned tensors.\n fn_output_signature: The output signature of `fn`. Must be specified if\n `fn`'s input and output signatures are different (i.e., if their\n structures, dtypes, or tensor types do not match).\n `fn_output_signature` can be specified using any of the following:\n\n * A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)\n * A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)\n * A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)\n * A (possibly nested) tuple, list, or dict containing the above types.\n\nReturns:\n A tensor or (possibly nested) sequence of tensors. Each tensor stacks the\n results of applying `fn` to tensors unstacked from `elems` along the first\n dimension, from first to last. The result may include ragged and sparse\n tensors.\n\nRaises:\n TypeError: if `fn` is not callable or the structure of the output of\n `fn` and `fn_output_signature` do not match.\n ValueError: if the lengths of the output of `fn` and `fn_output_signature`\n do not match, or if the `elems` does not contain any tensor.\n\nExamples:\n\n >>> elems = np.array([1, 2, 3, 4, 5, 6])\n >>> tf.map_fn(lambda x: x * x, elems)\n \n\n >>> elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))\n >>> tf.map_fn(lambda x: x[0] * x[1], elems, fn_output_signature=tf.int64)\n \n\n >>> elems = np.array([1, 2, 3])\n >>> tf.map_fn(lambda x: (x, -x), elems,\n ... fn_output_signature=(tf.int64, tf.int64))\n (,\n )"}, {"name": "matmul", "path": "./tf/linalg/matmul.md", "desc": "Multiplies matrix `a` by matrix `b`, producing `a` * `b`.", "type": "Functions", "docs": "Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n\n The inputs must, following any transpositions, be tensors of rank >= 2\n where the inner 2 dimensions specify valid matrix multiplication dimensions,\n and any further outer dimensions specify matching batch size.\n\n Both matrices must be of the same type. The supported types are:\n `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, `complex128`.\n\n Either matrix can be transposed or adjointed (conjugated and transposed) on\n the fly by setting one of the corresponding flag to `True`. These are `False`\n by default.\n\n If one or both of the matrices contain a lot of zeros, a more efficient\n multiplication algorithm can be used by setting the corresponding\n `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n This optimization is only available for plain matrices (rank-2 tensors) with\n datatypes `bfloat16` or `float32`.\n\n A simple 2-D tensor matrix multiplication:\n\n >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n >>> a # 2-D tensor\n \n >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])\n >>> b # 2-D tensor\n \n >>> c = tf.matmul(a, b)\n >>> c # `a` * `b`\n \n\n A batch matrix multiplication with batch shape [2]:\n\n >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])\n >>> a # 3-D tensor\n \n >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])\n >>> b # 3-D tensor\n \n >>> c = tf.matmul(a, b)\n >>> c # `a` * `b`\n \n\n Since python >= 3.5 the @ operator is supported\n (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,\n it simply calls the `tf.matmul()` function, so the following lines are\n equivalent:\n\n >>> d = a @ b @ [[10], [11]]\n >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])\n\n Args:\n a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,\n `complex64`, `complex128` and rank > 1.\n b: `tf.Tensor` with same type and rank as `a`.\n transpose_a: If `True`, `a` is transposed before multiplication.\n transpose_b: If `True`, `b` is transposed before multiplication.\n adjoint_a: If `True`, `a` is conjugated and transposed before\n multiplication.\n adjoint_b: If `True`, `b` is conjugated and transposed before\n multiplication.\n a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this\n **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n that assume most values in `a` are zero.\n See `tf.sparse.sparse_dense_matmul`\n for some support for `tf.sparse.SparseTensor` multiplication.\n b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this\n **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n that assume most values in `a` are zero.\n See `tf.sparse.sparse_dense_matmul`\n for some support for `tf.sparse.SparseTensor` multiplication.\n output_type: The output datatype if needed. Defaults to None in which case\n the output_type is the same as input type. Currently only works when input\n tensors are type (u)int8 and output_type can be int32.\n name: Name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix\n is the product of the corresponding matrices in `a` and `b`, e.g. if all\n transpose or adjoint attributes are `False`:\n\n `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,\n for all indices `i`, `j`.\n\n Note: This is matrix product, not element-wise product.\n\n\n Raises:\n ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and\n `adjoint_b` are both set to `True`.\n TypeError: If output_type is specified but the types of `a`, `b` and\n `output_type` is not (u)int8, (u)int8 and int32.\n "}, {"name": "matrix_square_root", "path": "./tf/linalg/sqrtm.md", "desc": "Computes the matrix square root of one or more square matrices:", "type": "Functions", "docs": "Computes the matrix square root of one or more square matrices:\n\n matmul(sqrtm(A), sqrtm(A)) = A\n\n The input matrix should be invertible. If the input matrix is real, it should\n have no eigenvalues which are real and negative (pairs of complex conjugate\n eigenvalues are allowed).\n\n The matrix square root is computed by first reducing the matrix to\n quasi-triangular form with the real Schur decomposition. The square root\n of the quasi-triangular matrix is then computed directly. Details of\n the algorithm can be found in: Nicholas J. Higham, \"Computing real\n square roots of a real matrix\", Linear Algebra Appl., 1987.\n\n The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\n form square matrices. The output is a tensor of the same shape as the input\n containing the matrix square root for all input submatrices `[..., :, :]`.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.\n Shape is `[..., M, M]`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "}, {"name": "maximum", "path": "./tf/math/maximum.md", "desc": "Returns the max of x and y (i.e. x > y ? x y", "type": "Functions", "docs": "Returns the max of x and y (i.e. x > y ? x : y) element-wise.\n\n Example:\n\n >>> x = tf.constant([0., 0., 0., 0.])\n >>> y = tf.constant([-2., 0., 2., 5.])\n >>> tf.math.maximum(x, y)\n \n\n Note that `maximum` supports [broadcast semantics](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) for `x` and `y`.\n\n >>> x = tf.constant([-5., 0., 0., 0.])\n >>> y = tf.constant([-3.])\n >>> tf.math.maximum(x, y)\n \n\n The reduction version of this elementwise operation is `tf.math.reduce_max`\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `int64`, `uint64`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "meshgrid", "path": "./tf/meshgrid.md", "desc": "Broadcasts parameters for evaluation on an N-D grid.", "type": "Functions", "docs": "Broadcasts parameters for evaluation on an N-D grid.\n\n Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`\n of N-D coordinate arrays for evaluating expressions on an N-D grid.\n\n Notes:\n\n `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.\n When the `indexing` argument is set to 'xy' (the default), the broadcasting\n instructions for the first two dimensions are swapped.\n\n Examples:\n\n Calling `X, Y = meshgrid(x, y)` with the tensors\n\n ```python\n x = [1, 2, 3]\n y = [4, 5, 6]\n X, Y = tf.meshgrid(x, y)\n # X = [[1, 2, 3],\n # [1, 2, 3],\n # [1, 2, 3]]\n # Y = [[4, 4, 4],\n # [5, 5, 5],\n # [6, 6, 6]]\n ```\n\n Args:\n *args: `Tensor`s with rank 1.\n **kwargs:\n - indexing: Either 'xy' or 'ij' (optional, default: 'xy').\n - name: A name for the operation (optional).\n\n Returns:\n outputs: A list of N `Tensor`s with rank N.\n\n Raises:\n TypeError: When no keyword arguments (kwargs) are passed.\n ValueError: When indexing keyword argument is not one of `xy` or `ij`.\n "}, {"name": "minimum", "path": "./tf/math/minimum.md", "desc": "Returns the min of x and y (i.e. x < y ? x y", "type": "Functions", "docs": "Returns the min of x and y (i.e. x < y ? x : y) element-wise.\n\n Both inputs are number-type tensors (except complex). `minimum` expects that\n both tensors have the same `dtype`.\n\n Examples:\n\n >>> x = tf.constant([0., 0., 0., 0.])\n >>> y = tf.constant([-5., -2., 0., 3.])\n >>> tf.math.minimum(x, y)\n \n\n Note that `minimum` supports [broadcast semantics](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) for `x` and `y`.\n\n >>> x = tf.constant([-5., 0., 0., 0.])\n >>> y = tf.constant([-3.])\n >>> tf.math.minimum(x, y)\n \n\n The reduction version of this elementwise operation is `tf.math.reduce_min`\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `int64`, `uint64`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "multiply", "path": "./tf/math/multiply.md", "desc": "Returns an element-wise x * y.", "type": "Functions", "docs": "Returns an element-wise x * y.\n\n For example:\n\n >>> x = tf.constant(([1, 2, 3, 4]))\n >>> tf.math.multiply(x, x)\n \n\n Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also\n pass in non-`Tensor` arguments:\n\n >>> tf.math.multiply(7,6)\n \n\n If `x.shape` is not the same as `y.shape`, they will be broadcast to a\n compatible shape. (More about broadcasting\n [here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)\n\n For example:\n\n >>> x = tf.ones([1, 2]);\n >>> y = tf.ones([2, 1]);\n >>> x * y # Taking advantage of operator overriding\n \n\n The reduction version of this elementwise operation is `tf.math.reduce_prod`\n\n Args:\n x: A Tensor. Must be one of the following types: `bfloat16`,\n `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,\n `int16`, `int32`, `int64`, `complex64`, `complex128`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n\n A `Tensor`. Has the same type as `x`.\n\n Raises:\n\n * InvalidArgumentError: When `x` and `y` have incompatible shapes or types.\n "}, {"name": "negative", "path": "./tf/math/negative.md", "desc": "Computes numerical negative value element-wise.", "type": "Functions", "docs": "Computes numerical negative value element-wise.\n\n I.e., \\\\(y = -x\\\\).\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n\n If `x` is a `SparseTensor`, returns\n `SparseTensor(x.indices, tf.math.negative(x.values, ...), x.dense_shape)`"}, {"name": "no_gradient", "path": "./tf/no_gradient.md", "desc": "Specifies that ops of type `op_type` is not differentiable.", "type": "Functions", "docs": "Specifies that ops of type `op_type` is not differentiable.\n\n This function should *not* be used for operations that have a\n well-defined gradient that is not yet implemented.\n\n This function is only used when defining a new op type. It may be\n used for ops such as `tf.size()` that are not differentiable. For\n example:\n\n ```python\n tf.no_gradient(\"Size\")\n ```\n\n The gradient computed for 'op_type' will then propagate zeros.\n\n For ops that have a well-defined gradient but are not yet implemented,\n no declaration should be made, and an error *must* be thrown if\n an attempt to request its gradient is made.\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n\n Raises:\n TypeError: If `op_type` is not a string.\n\n "}, {"name": "no_op", "path": "./tf/no_op.md", "desc": "Does nothing. Only useful as a placeholder for control edges.", "type": "Functions", "docs": "Does nothing. Only useful as a placeholder for control edges.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n "}, {"name": "nondifferentiable_batch_function", "path": "./tf/nondifferentiable_batch_function.md", "desc": "Batches the computation done by the decorated function.", "type": "Functions", "docs": "Batches the computation done by the decorated function.\n\n So, for example, in the following code\n\n ```python\n @batch_function(1, 2, 3)\n def layer(a):\n return tf.matmul(a, a)\n\n b = layer(w)\n ```\n\n if more than one session.run call is simultaneously trying to compute `b`\n the values of `w` will be gathered, non-deterministically concatenated\n along the first axis, and only one thread will run the computation. See the\n documentation of the `Batch` op for more details.\n\n Assumes that all arguments of the decorated function are Tensors which will\n be batched along their first dimension.\n\n SparseTensor is not supported. The return value of the decorated function\n must be a Tensor or a list/tuple of Tensors.\n\n Args:\n num_batch_threads: Number of scheduling threads for processing batches\n of work. Determines the number of batches processed in parallel.\n max_batch_size: Batch sizes will never be bigger than this.\n batch_timeout_micros: Maximum number of microseconds to wait before\n outputting an incomplete batch.\n allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,\n does nothing. Otherwise, supplies a list of batch sizes, causing the op\n to pad batches up to one of those sizes. The entries must increase\n monotonically, and the final entry must equal max_batch_size.\n max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.\n autograph: Whether to use autograph to compile python and eager style code\n for efficient graph-mode execution.\n enable_large_batch_splitting: The value of this option doesn't affect\n processing output given the same input; it affects implementation details\n as stated below: 1. Improve batching efficiency by eliminating unnecessary\n adding. 2.`max_batch_size` specifies the limit of input and\n `allowed_batch_sizes` specifies the limit of a task to be processed. API\n user can give an input of size 128 when 'max_execution_batch_size'\n is 32 -> implementation can split input of 128 into 4 x 32, schedule\n concurrent processing, and then return concatenated results corresponding\n to 128.\n\n Returns:\n The decorated function will return the unbatched computation output Tensors.\n "}, {"name": "norm", "path": "./tf/norm.md", "desc": "Computes the norm of vectors, matrices, and tensors.", "type": "Functions", "docs": "Computes the norm of vectors, matrices, and tensors.\n\n This function can compute several different vector norms (the 1-norm, the\n Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and\n matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).\n\n Args:\n tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`\n ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`,\n `1`, `2`, `np.inf` and any positive real number yielding the corresponding\n p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if\n `tensor` is a matrix and equivalent to 2-norm for vectors.\n Some restrictions apply:\n a) The Frobenius norm `'fro'` is not defined for vectors,\n b) If axis is a 2-tuple (matrix norm), only `'euclidean'`, '`fro'`, `1`,\n `2`, `np.inf` are supported.\n See the description of `axis` on how to compute norms for a batch of\n vectors or matrices stored in a tensor.\n axis: If `axis` is `None` (the default), the input is considered a vector\n and a single vector norm is computed over the entire set of values in the\n tensor, i.e. `norm(tensor, ord=ord)` is equivalent to\n `norm(reshape(tensor, [-1]), ord=ord)`.\n If `axis` is a Python integer, the input is considered a batch of vectors,\n and `axis` determines the axis in `tensor` over which to compute vector\n norms.\n If `axis` is a 2-tuple of Python integers it is considered a batch of\n matrices and `axis` determines the axes in `tensor` over which to compute\n a matrix norm.\n Negative indices are supported. Example: If you are passing a tensor that\n can be either a matrix or a batch of matrices at runtime, pass\n `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are\n computed.\n keepdims: If True, the axis indicated in `axis` are kept with size 1.\n Otherwise, the dimensions in `axis` are removed from the output shape.\n name: The name of the op.\n\n Returns:\n output: A `Tensor` of the same type as tensor, containing the vector or\n matrix norms. If `keepdims` is True then the rank of output is equal to\n the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar,\n if `axis` is an integer, the rank of `output` is one less than the rank\n of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less\n than the rank of `tensor`.\n\n Raises:\n ValueError: If `ord` or `axis` is invalid.\n\n @compatibility(numpy)\n Mostly equivalent to numpy.linalg.norm.\n Not supported: ord <= 0, 2-norm for matrices, nuclear norm.\n Other differences:\n a) If axis is `None`, treats the flattened `tensor` as a vector\n regardless of rank.\n b) Explicitly supports 'euclidean' norm as the default, including for\n higher order tensors.\n @end_compatibility\n "}, {"name": "not_equal", "path": "./tf/math/not_equal.md", "desc": "Returns the truth value of (x != y", "type": "Functions", "docs": "Returns the truth value of (x != y) element-wise.\n\n Performs a [broadcast](\n https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the\n arguments and then an element-wise inequality comparison, returning a Tensor\n of boolean values.\n\n For example:\n\n >>> x = tf.constant([2, 4])\n >>> y = tf.constant(2)\n >>> tf.math.not_equal(x, y)\n \n\n >>> x = tf.constant([2, 4])\n >>> y = tf.constant([2, 4])\n >>> tf.math.not_equal(x, y)\n \n\n Args:\n x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.\n y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of type bool with the same size as that of x or y.\n\n Raises:\n `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible\n "}, {"name": "numpy_function", "path": "./tf/numpy_function.md", "desc": "Wraps a python function and uses it as a TensorFlow op.", "type": "Functions", "docs": "Wraps a python function and uses it as a TensorFlow op.\n\n Given a python function `func` wrap this function as an operation in a\n TensorFlow function. `func` must take numpy arrays as its arguments and\n return numpy arrays as its outputs.\n\n The following example creates a TensorFlow graph with `np.sinh()` as an\n operation in the graph:\n\n >>> def my_numpy_func(x):\n ... # x will be a numpy array with the contents of the input to the\n ... # tf.function\n ... return np.sinh(x)\n >>> @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])\n ... def tf_function(input):\n ... y = tf.numpy_function(my_numpy_func, [input], tf.float32)\n ... return y * y\n >>> tf_function(tf.constant(1.))\n \n\n Comparison to `tf.py_function`:\n `tf.py_function` and `tf.numpy_function` are very similar, except that\n `tf.numpy_function` takes numpy arrays, and not `tf.Tensor`s. If you want the\n function to contain `tf.Tensors`, and have any TensorFlow operations executed\n in the function be differentiable, please use `tf.py_function`.\n\n Note: We recommend to avoid using `tf.numpy_function` outside of\n prototyping and experimentation due to the following known limitations:\n\n * Calling `tf.numpy_function` will acquire the Python Global Interpreter Lock\n (GIL) that allows only one thread to run at any point in time. This will\n preclude efficient parallelization and distribution of the execution of the\n program. Therefore, you are discouraged to use `tf.numpy_function` outside\n of prototyping and experimentation.\n\n * The body of the function (i.e. `func`) will not be serialized in a\n `tf.SavedModel`. Therefore, you should not use this function if you need to\n serialize your model and restore it in a different environment.\n\n * The operation must run in the same address space as the Python program\n that calls `tf.numpy_function()`. If you are using distributed\n TensorFlow, you must run a `tf.distribute.Server` in the same process as the\n program that calls `tf.numpy_function` you must pin the created\n operation to a device in that server (e.g. using `with tf.device():`).\n\n * Currently `tf.numpy_function` is not compatible with XLA. Calling\n `tf.numpy_function` inside `tf.function(jit_comiple=True)` will raise an\n error.\n\n * Since the function takes numpy arrays, you cannot take gradients\n through a numpy_function. If you require something that is differentiable,\n please consider using tf.py_function.\n\n Args:\n func: A Python function, which accepts `numpy.ndarray` objects as arguments\n and returns a list of `numpy.ndarray` objects (or a single\n `numpy.ndarray`). This function must accept as many arguments as there are\n tensors in `inp`, and these argument types will match the corresponding\n `tf.Tensor` objects in `inp`. The returns `numpy.ndarray`s must match the\n number and types defined `Tout`.\n Important Note: Input and output `numpy.ndarray`s of `func` are not\n guaranteed to be copies. In some cases their underlying memory will be\n shared with the corresponding TensorFlow tensors. In-place modification\n or storing `func` input or return values in python datastructures\n without explicit (np.)copy can have non-deterministic consequences.\n inp: A list of `tf.Tensor` objects.\n Tout: A list or tuple of tensorflow data types or a single tensorflow data\n type if there is only one, indicating what `func` returns.\n stateful: (Boolean.) Setting this argument to False tells the runtime to\n treat the function as stateless, which enables certain optimizations.\n A function is stateless when given the same input it will return the\n same output and have no side effects; its only purpose is to have a\n return value.\n The behavior for a stateful function with the `stateful` argument False\n is undefined. In particular, caution should be taken when\n mutating the input arguments as this is a stateful operation.\n name: (Optional) A name for the operation.\n\n Returns:\n Single or list of `tf.Tensor` which `func` computes.\n "}, {"name": "one_hot", "path": "./tf/one_hot.md", "desc": "Returns a one-hot tensor.", "type": "Functions", "docs": "Returns a one-hot tensor.\n\n See also `tf.fill`, `tf.eye`.\n\n The locations represented by indices in `indices` take value `on_value`,\n while all other locations take value `off_value`.\n\n `on_value` and `off_value` must have matching data types. If `dtype` is also\n provided, they must be the same data type as specified by `dtype`.\n\n If `on_value` is not provided, it will default to the value `1` with type\n `dtype`\n\n If `off_value` is not provided, it will default to the value `0` with type\n `dtype`\n\n If the input `indices` is rank `N`, the output will have rank `N+1`. The\n new axis is created at dimension `axis` (default: the new axis is appended\n at the end).\n\n If `indices` is a scalar the output shape will be a vector of length `depth`\n\n If `indices` is a vector of length `features`, the output shape will be:\n\n ```\n features x depth if axis == -1\n depth x features if axis == 0\n ```\n\n If `indices` is a matrix (batch) with shape `[batch, features]`, the output\n shape will be:\n\n ```\n batch x features x depth if axis == -1\n batch x depth x features if axis == 1\n depth x batch x features if axis == 0\n ```\n\n If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer\n to a non-ragged axis. The output will be equivalent to applying 'one_hot' on\n the values of the RaggedTensor, and creating a new RaggedTensor from the\n result.\n\n If `dtype` is not provided, it will attempt to assume the data type of\n `on_value` or `off_value`, if one or both are passed in. If none of\n `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the\n value `tf.float32`.\n\n Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,\n etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.\n\n For example:\n\n ```python\n indices = [0, 1, 2]\n depth = 3\n tf.one_hot(indices, depth) # output: [3 x 3]\n # [[1., 0., 0.],\n # [0., 1., 0.],\n # [0., 0., 1.]]\n\n indices = [0, 2, -1, 1]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=5.0, off_value=0.0,\n axis=-1) # output: [4 x 3]\n # [[5.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 5.0], # one_hot(2)\n # [0.0, 0.0, 0.0], # one_hot(-1)\n # [0.0, 5.0, 0.0]] # one_hot(1)\n\n indices = [[0, 2], [1, -1]]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=1.0, off_value=0.0,\n axis=-1) # output: [2 x 2 x 3]\n # [[[1.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 1.0]], # one_hot(2)\n # [[0.0, 1.0, 0.0], # one_hot(1)\n # [0.0, 0.0, 0.0]]] # one_hot(-1)\n\n indices = tf.ragged.constant([[0, 1], [2]])\n depth = 3\n tf.one_hot(indices, depth) # output: [2 x None x 3]\n # [[[1., 0., 0.],\n # [0., 1., 0.]],\n # [[0., 0., 1.]]]\n ```\n\n Args:\n indices: A `Tensor` of indices.\n depth: A scalar defining the depth of the one hot dimension.\n on_value: A scalar defining the value to fill in output when `indices[j]\n = i`. (default: 1)\n off_value: A scalar defining the value to fill in output when `indices[j]\n != i`. (default: 0)\n axis: The axis to fill (default: -1, a new inner-most axis).\n dtype: The data type of the output tensor.\n name: A name for the operation (optional).\n\n Returns:\n output: The one-hot tensor.\n\n Raises:\n TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`\n TypeError: If dtype of `on_value` and `off_value` don't match one another\n "}, {"name": "ones", "path": "./tf/ones.md", "desc": "Creates a tensor with all elements set to one (1", "type": "Functions", "docs": "Creates a tensor with all elements set to one (1).\n\n See also `tf.ones_like`, `tf.zeros`, `tf.fill`, `tf.eye`.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to one.\n\n >>> tf.ones([3, 4], tf.int32)\n \n\n Args:\n shape: A `list` of integers, a `tuple` of integers, or\n a 1-D `Tensor` of type `int32`.\n dtype: Optional DType of an element in the resulting `Tensor`. Default is\n `tf.float32`.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor` with all elements set to one (1).\n "}, {"name": "ones_like", "path": "./tf/ones_like.md", "desc": "Creates a tensor of all ones that has the same shape as the input.", "type": "Functions", "docs": "Creates a tensor of all ones that has the same shape as the input.\n\n See also `tf.ones`.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the\n same type and shape as `tensor` with all elements set to 1. Optionally,\n you can use `dtype` to specify a new type for the returned tensor.\n\n For example:\n\n >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.ones_like(tensor)\n \n\n Args:\n input: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to one.\n "}, {"name": "pad", "path": "./tf/pad.md", "desc": "Pads a tensor.", "type": "Functions", "docs": "Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n t = tf.constant([[1, 2, 3], [4, 5, 6]])\n paddings = tf.constant([[1, 1,], [2, 2]])\n # 'constant_values' is 0.\n # rank of 't' is 2.\n tf.pad(t, paddings, \"CONSTANT\") # [[0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 1, 2, 3, 0, 0],\n # [0, 0, 4, 5, 6, 0, 0],\n # [0, 0, 0, 0, 0, 0, 0]]\n\n tf.pad(t, paddings, \"REFLECT\") # [[6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1],\n # [6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1]]\n\n tf.pad(t, paddings, \"SYMMETRIC\") # [[2, 1, 1, 2, 3, 3, 2],\n # [2, 1, 1, 2, 3, 3, 2],\n # [5, 4, 4, 5, 6, 6, 5],\n # [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\" (case-insensitive)\n constant_values: In \"CONSTANT\" mode, the scalar pad value to use. Must be\n same type as `tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n "}, {"name": "parallel_stack", "path": "./tf/parallel_stack.md", "desc": "Stacks a list of rank-`R` tensors into one rank-`(R+1", "type": "Functions", "docs": "Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.\n\n Requires that the shape of inputs be known at graph construction time.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the first dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`\n tensor will have the shape `(N, A, B, C)`.\n\n For example:\n\n ```python\n x = tf.constant([1, 4])\n y = tf.constant([2, 5])\n z = tf.constant([3, 6])\n tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]\n ```\n\n The difference between `stack` and `parallel_stack` is that `stack` requires\n all the inputs be computed before the operation will begin but doesn't require\n that the input shapes be known during graph construction.\n\n `parallel_stack` will copy pieces of the input into the output as they become\n available, in some situations this can provide a performance benefit.\n\n Unlike `stack`, `parallel_stack` does NOT support backpropagation.\n\n This is the opposite of unstack. The numpy equivalent is\n\n tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])\n\n @compatibility(eager)\n parallel_stack is not compatible with eager execution.\n @end_compatibility\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n\n Raises:\n RuntimeError: if executed in eager mode.\n "}, {"name": "pow", "path": "./tf/math/pow.md", "desc": "Computes the power of one value to another.", "type": "Functions", "docs": "Computes the power of one value to another.\n\n Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n corresponding elements in `x` and `y`. For example:\n\n ```python\n x = tf.constant([[2, 2], [3, 3]])\n y = tf.constant([[8, 16], [2, 3]])\n tf.pow(x, y) # [[256, 65536], [9, 27]]\n ```\n\n Args:\n x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`.\n "}, {"name": "print", "path": "./tf/print.md", "desc": "Print the specified inputs.", "type": "Functions", "docs": "Print the specified inputs.\n\n A TensorFlow operator that prints the specified inputs to a desired\n output stream or logging level. The inputs may be dense or sparse Tensors,\n primitive python objects, data structures that contain tensors, and printable\n Python objects. Printed tensors will recursively show the first and last\n elements of each dimension to summarize.\n\n Example:\n Single-input usage:\n\n ```python\n tensor = tf.range(10)\n tf.print(tensor, output_stream=sys.stderr)\n ```\n\n (This prints \"[0 1 2 ... 7 8 9]\" to sys.stderr)\n\n Multi-input usage:\n\n ```python\n tensor = tf.range(10)\n tf.print(\"tensors:\", tensor, {2: tensor * 2}, output_stream=sys.stdout)\n ```\n\n (This prints \"tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}\" to\n sys.stdout)\n\n Changing the input separator:\n ```python\n tensor_a = tf.range(2)\n tensor_b = tensor_a * 2\n tf.print(tensor_a, tensor_b, output_stream=sys.stderr, sep=',')\n ```\n\n (This prints \"[0 1],[0 2]\" to sys.stderr)\n\n Usage in a `tf.function`:\n\n ```python\n @tf.function\n def f():\n tensor = tf.range(10)\n tf.print(tensor, output_stream=sys.stderr)\n return tensor\n\n range_tensor = f()\n ```\n\n (This prints \"[0 1 2 ... 7 8 9]\" to sys.stderr)\n\n *Compatibility usage in TF 1.x graphs*:\n\n In graphs manually created outside of `tf.function`, this method returns\n the created TF operator that prints the data. To make sure the\n operator runs, users need to pass the produced op to\n `tf.compat.v1.Session`'s run method, or to use the op as a control\n dependency for executed ops by specifying\n `with tf.compat.v1.control_dependencies([print_op])`.\n\n ```python\n tf.compat.v1.disable_v2_behavior() # for TF1 compatibility only\n\n sess = tf.compat.v1.Session()\n with sess.as_default():\n tensor = tf.range(10)\n print_op = tf.print(\"tensors:\", tensor, {2: tensor * 2},\n output_stream=sys.stdout)\n with tf.control_dependencies([print_op]):\n tripled_tensor = tensor * 3\n\n sess.run(tripled_tensor)\n ```\n\n (This prints \"tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}\" to\n sys.stdout)\n\n Note: In Jupyter notebooks and colabs, `tf.print` prints to the notebook\n cell outputs. It will not write to the notebook kernel's console logs.\n\n Args:\n *inputs: Positional arguments that are the inputs to print. Inputs in the\n printed output will be separated by spaces. Inputs may be python\n primitives, tensors, data structures such as dicts and lists that may\n contain tensors (with the data structures possibly nested in arbitrary\n ways), and printable python objects.\n output_stream: The output stream, logging level, or file to print to.\n Defaults to sys.stderr, but sys.stdout, tf.compat.v1.logging.info,\n tf.compat.v1.logging.warning, tf.compat.v1.logging.error,\n absl.logging.info, absl.logging.warning and absl.logging.error are also\n supported. To print to a file, pass a string started with \"file://\"\n followed by the file path, e.g., \"file:///tmp/foo.out\".\n summarize: The first and last `summarize` elements within each dimension are\n recursively printed per Tensor. If None, then the first 3 and last 3\n elements of each dimension are printed for each tensor. If set to -1, it\n will print all elements of every tensor.\n sep: The string to use to separate the inputs. Defaults to \" \".\n end: End character that is appended at the end the printed string. Defaults\n to the newline character.\n name: A name for the operation (optional).\n\n Returns:\n None when executing eagerly. During graph tracing this returns\n a TF operator that prints the specified inputs in the specified output\n stream or logging level. This operator will be automatically executed\n except inside of `tf.compat.v1` graphs and sessions.\n\n Raises:\n ValueError: If an unsupported output stream is specified.\n "}, {"name": "py_function", "path": "./tf/py_function.md", "desc": "Wraps a python function into a TensorFlow op that executes it eagerly.", "type": "Functions", "docs": "Wraps a python function into a TensorFlow op that executes it eagerly.\n\n This function allows expressing computations in a TensorFlow graph as\n Python functions. In particular, it wraps a Python function `func`\n in a once-differentiable TensorFlow operation that executes it with eager\n execution enabled. As a consequence, `tf.py_function` makes it\n possible to express control flow using Python constructs (`if`, `while`,\n `for`, etc.), instead of TensorFlow control flow constructs (`tf.cond`,\n `tf.while_loop`). For example, you might use `tf.py_function` to\n implement the log huber function:\n\n ```python\n def log_huber(x, m):\n if tf.abs(x) <= m:\n return x**2\n else:\n return m**2 * (1 - 2 * tf.math.log(m) + tf.math.log(x**2))\n\n x = tf.constant(1.0)\n m = tf.constant(2.0)\n\n with tf.GradientTape() as t:\n t.watch([x, m])\n y = tf.py_function(func=log_huber, inp=[x, m], Tout=tf.float32)\n\n dy_dx = t.gradient(y, x)\n assert dy_dx.numpy() == 2.0\n ```\n\n You can also use `tf.py_function` to debug your models at runtime\n using Python tools, i.e., you can isolate portions of your code that\n you want to debug, wrap them in Python functions and insert `pdb` tracepoints\n or print statements as desired, and wrap those functions in\n `tf.py_function`.\n\n For more information on eager execution, see the\n [Eager guide](https://tensorflow.org/guide/eager).\n\n `tf.py_function` is similar in spirit to `tf.compat.v1.py_func`, but unlike\n the latter, the former lets you use TensorFlow operations in the wrapped\n Python function. In particular, while `tf.compat.v1.py_func` only runs on CPUs\n and wraps functions that take NumPy arrays as inputs and return NumPy arrays\n as outputs, `tf.py_function` can be placed on GPUs and wraps functions\n that take Tensors as inputs, execute TensorFlow operations in their bodies,\n and return Tensors as outputs.\n\n Note: We recommend to avoid using `tf.py_function` outside of prototyping\n and experimentation due to the following known limitations:\n\n * Calling `tf.py_function` will acquire the Python Global Interpreter Lock\n (GIL) that allows only one thread to run at any point in time. This will\n preclude efficient parallelization and distribution of the execution of the\n program.\n\n * The body of the function (i.e. `func`) will not be serialized in a\n `GraphDef`. Therefore, you should not use this function if you need to\n serialize your model and restore it in a different environment.\n\n * The operation must run in the same address space as the Python program\n that calls `tf.py_function()`. If you are using distributed\n TensorFlow, you must run a `tf.distribute.Server` in the same process as the\n program that calls `tf.py_function()` and you must pin the created\n operation to a device in that server (e.g. using `with tf.device():`).\n\n * Currently `tf.py_function` is not compatible with XLA. Calling\n `tf.py_function` inside `tf.function(jit_comiple=True)` will raise an\n error.\n\n Args:\n func: A Python function that accepts `inp` as arguments, and returns a\n value (or list of values) whose type is described by `Tout`.\n\n inp: Input arguments for `func`. A list whose elements are `Tensor`s or\n `CompositeTensors` (such as `tf.RaggedTensor`); or a single `Tensor` or\n `CompositeTensor`.\n\n Tout: The type(s) of the value(s) returned by `func`. One of the\n following.\n\n * If `func` returns a `Tensor` (or a value that can be converted to a\n Tensor): the `tf.DType` for that value.\n * If `func` returns a `CompositeTensor`: The `tf.TypeSpec` for that value.\n * If `func` returns `None`: the empty list (`[]`).\n * If `func` returns a list of `Tensor` and `CompositeTensor` values:\n a corresponding list of `tf.DType`s and `tf.TypeSpec`s for each value.\n\n name: A name for the operation (optional).\n\n Returns:\n The value(s) computed by `func`: a `Tensor`, `CompositeTensor`, or list of\n `Tensor` and `CompositeTensor`; or an empty list if `func` returns `None`.\n "}, {"name": "random_index_shuffle", "path": "./tf/random_index_shuffle.md", "desc": "Outputs the position of `value` in a permutation of [0, ..., max_index].", "type": "Functions", "docs": "Outputs the position of `value` in a permutation of [0, ..., max_index].\n\n Output values are a bijection of the `index` for any combination and `seed` and `max_index`.\n\n If multiple inputs are vectors (matrix in case of seed) then the size of the\n first dimension must match.\n\n The outputs are deterministic.\n\n Args:\n index: A `Tensor`. Must be one of the following types: `int32`, `uint32`, `int64`, `uint64`.\n A scalar tensor or a vector of dtype `dtype`. The index (or indices) to be shuffled. Must be within [0, max_index].\n seed: A `Tensor`. Must be one of the following types: `int32`, `uint32`, `int64`, `uint64`.\n A tensor of dtype `Tseed` and shape [3] or [n, 3]. The random seed.\n max_index: A `Tensor`. Must have the same type as `index`.\n A scalar tensor or vector of dtype `dtype`. The upper bound(s) of the interval (inclusive).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `index`.\n "}, {"name": "range", "path": "./tf/range.md", "desc": "Creates a sequence of numbers.", "type": "Functions", "docs": "Creates a sequence of numbers.\n\n Creates a sequence of numbers that begins at `start` and extends by\n increments of `delta` up to but not including `limit`.\n\n The dtype of the resulting tensor is inferred from the inputs unless\n it is provided explicitly.\n\n Like the Python builtin `range`, `start` defaults to 0, so that\n `range(n) = range(0, n)`.\n\n For example:\n\n >>> start = 3\n >>> limit = 18\n >>> delta = 3\n >>> tf.range(start, limit, delta)\n \n\n >>> start = 3\n >>> limit = 1\n >>> delta = -0.5\n >>> tf.range(start, limit, delta)\n \n\n >>> limit = 5\n >>> tf.range(limit)\n \n\n Args:\n start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`\n is not None; otherwise, acts as range limit and first entry defaults to 0.\n limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,\n defaults to the value of `start` while the first entry of the range\n defaults to 0.\n delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to\n 1.\n dtype: The type of the elements of the resulting tensor.\n name: A name for the operation. Defaults to \"range\".\n\n Returns:\n An 1-D `Tensor` of type `dtype`.\n\n @compatibility(numpy)\n Equivalent to np.arange\n @end_compatibility\n "}, {"name": "rank", "path": "./tf/rank.md", "desc": "Returns the rank of a tensor.", "type": "Functions", "docs": "Returns the rank of a tensor.\n\n See also `tf.shape`.\n\n Returns a 0-D `int32` `Tensor` representing the rank of `input`.\n\n For example:\n\n ```python\n # shape of tensor 't' is [2, 2, 3]\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.rank(t) # 3\n ```\n\n **Note**: The rank of a tensor is not the same as the rank of a matrix. The\n rank of a tensor is the number of indices required to uniquely select each\n element of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int32`.\n\n @compatibility(numpy)\n Equivalent to np.ndim\n @end_compatibility\n "}, {"name": "realdiv", "path": "./tf/realdiv.md", "desc": "Returns x / y element-wise for real types.", "type": "Functions", "docs": "Returns x / y element-wise for real types.\n\n If `x` and `y` are reals, this will return the floating-point division.\n\n *NOTE*: `Div` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "recompute_grad", "path": "./tf/recompute_grad.md", "desc": "Defines a function as a recompute-checkpoint for the tape auto-diff.", "type": "Functions", "docs": "Defines a function as a recompute-checkpoint for the tape auto-diff.\n\n Tape checkpointing is a technique to reduce the memory consumption of the\n auto-diff tape:\n\n - Without tape checkpointing operations and intermediate values are\n recorded to the tape for use in the backward pass.\n\n - With tape checkpointing, only the function call and its inputs are\n recorded. During back-propagation the `recompute_grad` custom gradient\n (`tf.custom_gradient`) recomputes the function under a localized Tape object.\n This recomputation of the function during backpropagation performs redundant\n calculation, but reduces the overall memory usage of the Tape.\n\n >>> y = tf.Variable(1.0)\n\n >>> def my_function(x):\n ... tf.print('running')\n ... z = x*y\n ... return z\n\n >>> my_function_recompute = tf.recompute_grad(my_function)\n\n >>> with tf.GradientTape() as tape:\n ... r = tf.constant(1.0)\n ... for i in range(4):\n ... r = my_function_recompute(r)\n running\n running\n running\n running\n\n >>> grad = tape.gradient(r, [y])\n running\n running\n running\n running\n\n Without `recompute_grad`, the tape contains all intermitate steps, and no\n recomputation is performed.\n\n >>> with tf.GradientTape() as tape:\n ... r = tf.constant(1.0)\n ... for i in range(4):\n ... r = my_function(r)\n running\n running\n running\n running\n\n >>> grad = tape.gradient(r, [y])\n\n\n If `f` was a `tf.keras` `Model` or `Layer` object, methods and attributes\n such as `f.variables` are not available on the returned function `g`.\n Either keep a reference of `f` , or use `g.__wrapped__` for accessing\n these variables and methods.\n\n\n >>> def print_running_and_return(x):\n ... tf.print(\"running\")\n ... return x\n\n >>> model = tf.keras.Sequential([\n ... tf.keras.layers.Lambda(print_running_and_return),\n ... tf.keras.layers.Dense(2)\n ... ])\n\n >>> model_recompute = tf.recompute_grad(model)\n\n >>> with tf.GradientTape(persistent=True) as tape:\n ... r = tf.constant([[1,2]])\n ... for i in range(4):\n ... r = model_recompute(r)\n running\n running\n running\n running\n\n >>> grad = tape.gradient(r, model.variables)\n running\n running\n running\n running\n\n Alternatively, use the `__wrapped__` attribute to access the original\n model object.\n\n >>> grad = tape.gradient(r, model_recompute.__wrapped__.variables)\n running\n running\n running\n running\n\n\n Args:\n f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.\n\n Returns:\n A function `g` wrapping `f` that defines a custom gradient, which recomputes\n `f` on the backwards pass of a gradient call.\n "}, {"name": "reduce_all", "path": "./tf/math/reduce_all.md", "desc": "Computes tf.math.logical_and of elements across dimensions of a tensor.", "type": "Functions", "docs": "Computes `tf.math.logical_and` of elements across dimensions of a tensor.\n\n This is the reduction operation for the elementwise `tf.math.logical_and` op.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n of the entries in `axis`, which must be unique. If `keepdims` is true, the\n reduced dimensions are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n >>> x = tf.constant([[True, True], [False, False]])\n >>> tf.math.reduce_all(x)\n \n >>> tf.math.reduce_all(x, 0)\n \n >>> tf.math.reduce_all(x, 1)\n \n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.all\n @end_compatibility\n "}, {"name": "reduce_any", "path": "./tf/math/reduce_any.md", "desc": "Computes tf.math.logical_or of elements across dimensions of a tensor.", "type": "Functions", "docs": "Computes `tf.math.logical_or` of elements across dimensions of a tensor.\n\n This is the reduction operation for the elementwise `tf.math.logical_or` op.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n of the entries in `axis`, which must be unique. If `keepdims` is true, the\n reduced dimensions are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n >>> x = tf.constant([[True, True], [False, False]])\n >>> tf.reduce_any(x)\n \n >>> tf.reduce_any(x, 0)\n \n >>> tf.reduce_any(x, 1)\n \n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.any\n @end_compatibility\n "}, {"name": "reduce_logsumexp", "path": "./tf/math/reduce_logsumexp.md", "desc": "Computes log(sum(exp(elements across dimensions of a tensor", "type": "Functions", "docs": "Computes log(sum(exp(elements across dimensions of a tensor))).\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n of the entries in `axis`, which must be unique. If `keepdims` is true, the\n reduced dimensions are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n This function is more numerically stable than log(sum(exp(input))). It avoids\n overflows caused by taking the exp of large inputs and underflows caused by\n taking the log of small inputs.\n\n For example:\n\n ```python\n x = tf.constant([[0., 0., 0.], [0., 0., 0.]])\n tf.reduce_logsumexp(x) # log(6)\n tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]\n tf.reduce_logsumexp(x, 1) # [log(3), log(3)]\n tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]\n tf.reduce_logsumexp(x, [0, 1]) # log(6)\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n "}, {"name": "reduce_max", "path": "./tf/math/reduce_max.md", "desc": "Computes tf.math.maximum of elements across dimensions of a tensor.", "type": "Functions", "docs": "Computes `tf.math.maximum` of elements across dimensions of a tensor.\n\n This is the reduction operation for the elementwise `tf.math.maximum` op.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n of the entries in `axis`, which must be unique. If `keepdims` is true, the\n reduced dimensions are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Usage example:\n\n >>> x = tf.constant([5, 1, 2, 4])\n >>> tf.reduce_max(x)\n \n >>> x = tf.constant([-5, -1, -2, -4])\n >>> tf.reduce_max(x)\n \n >>> x = tf.constant([4, float('nan')])\n >>> tf.reduce_max(x)\n \n >>> x = tf.constant([float('nan'), float('nan')])\n >>> tf.reduce_max(x)\n \n >>> x = tf.constant([float('-inf'), float('inf')])\n >>> tf.reduce_max(x)\n \n\n See the numpy docs for `np.amax` and `np.nanmax` behavior.\n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n "}, {"name": "reduce_mean", "path": "./tf/math/reduce_mean.md", "desc": "Computes the mean of elements across dimensions of a tensor.", "type": "Functions", "docs": "Computes the mean of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis` by computing the\n mean of elements across the dimensions in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n of the entries in `axis`, which must be unique. If `keepdims` is true, the\n reduced dimensions are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a tensor with a single\n element is returned.\n\n For example:\n\n >>> x = tf.constant([[1., 1.], [2., 2.]])\n >>> tf.reduce_mean(x)\n \n >>> tf.reduce_mean(x, 0)\n \n >>> tf.reduce_mean(x, 1)\n \n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.mean\n\n Please note that `np.mean` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,\n for example:\n\n >>> x = tf.constant([1, 0, 1, 0])\n >>> tf.reduce_mean(x)\n \n >>> y = tf.constant([1., 0., 1., 0.])\n >>> tf.reduce_mean(y)\n \n\n @end_compatibility\n "}, {"name": "reduce_min", "path": "./tf/math/reduce_min.md", "desc": "Computes the tf.math.minimum of elements across dimensions of a tensor.", "type": "Functions", "docs": "Computes the `tf.math.minimum` of elements across dimensions of a tensor.\n\n This is the reduction operation for the elementwise `tf.math.minimum` op.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n of the entries in `axis`, which must be unique. If `keepdims` is true, the\n reduced dimensions are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n >>> a = tf.constant([\n ... [[1, 2], [3, 4]],\n ... [[1, 2], [3, 4]]\n ... ])\n >>> tf.reduce_min(a)\n \n\n Choosing a specific axis returns minimum element in the given axis:\n\n >>> b = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.reduce_min(b, axis=0)\n \n >>> tf.reduce_min(b, axis=1)\n \n\n Setting `keepdims` to `True` retains the dimension of `input_tensor`:\n\n >>> tf.reduce_min(a, keepdims=True)\n \n >>> tf.math.reduce_min(a, axis=0, keepdims=True)\n \n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.min\n @end_compatibility\n "}, {"name": "reduce_prod", "path": "./tf/math/reduce_prod.md", "desc": "Computes tf.math.multiply of elements across dimensions of a tensor.", "type": "Functions", "docs": "Computes `tf.math.multiply` of elements across dimensions of a tensor.\n\n This is the reduction operation for the elementwise `tf.math.multiply` op.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n >>> x = tf.constant([[1., 2.], [3., 4.]])\n >>> tf.math.reduce_prod(x)\n \n >>> tf.math.reduce_prod(x, 0)\n \n >>> tf.math.reduce_prod(x, 1)\n \n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.prod\n @end_compatibility\n "}, {"name": "reduce_sum", "path": "./tf/math/reduce_sum.md", "desc": "Computes the sum of elements across dimensions of a tensor.", "type": "Functions", "docs": "Computes the sum of elements across dimensions of a tensor.\n\n This is the reduction operation for the elementwise `tf.math.add` op.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n of the entries in `axis`, which must be unique. If `keepdims` is true, the\n reduced dimensions are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n >>> # x has a shape of (2, 3) (two rows and three columns):\n >>> x = tf.constant([[1, 1, 1], [1, 1, 1]])\n >>> x.numpy()\n array([[1, 1, 1],\n [1, 1, 1]], dtype=int32)\n >>> # sum all the elements\n >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6\n >>> tf.reduce_sum(x).numpy()\n 6\n >>> # reduce along the first dimension\n >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]\n >>> tf.reduce_sum(x, 0).numpy()\n array([2, 2, 2], dtype=int32)\n >>> # reduce along the second dimension\n >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]\n >>> tf.reduce_sum(x, 1).numpy()\n array([3, 3], dtype=int32)\n >>> # keep the original dimensions\n >>> tf.reduce_sum(x, 1, keepdims=True).numpy()\n array([[3],\n [3]], dtype=int32)\n >>> # reduce along both dimensions\n >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6\n >>> # or, equivalently, reduce along rows, then reduce the resultant array\n >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]\n >>> # 2 + 2 + 2 = 6\n >>> tf.reduce_sum(x, [0, 1]).numpy()\n 6\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor)]`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n\n @compatibility(numpy)\n Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to\n int64 while tensorflow returns the same dtype as the input.\n @end_compatibility\n "}, {"name": "register_tensor_conversion_function", "path": "./tf/register_tensor_conversion_function.md", "desc": "Registers a function for converting objects of `base_type` to `Tensor`.", "type": "Functions", "docs": "Registers a function for converting objects of `base_type` to `Tensor`.\n\n The conversion function must have the following signature:\n\n ```python\n def conversion_func(value, dtype=None, name=None, as_ref=False):\n # ...\n ```\n\n It must return a `Tensor` with the given `dtype` if specified. If the\n conversion function creates a new `Tensor`, it should use the given\n `name` if specified. All exceptions will be propagated to the caller.\n\n The conversion function may return `NotImplemented` for some\n inputs. In this case, the conversion process will continue to try\n subsequent conversion functions.\n\n If `as_ref` is true, the function must return a `Tensor` reference,\n such as a `Variable`.\n\n NOTE: The conversion functions will execute in order of priority,\n followed by order of registration. To ensure that a conversion function\n `F` runs before another conversion function `G`, ensure that `F` is\n registered with a smaller priority than `G`.\n\n Args:\n base_type: The base type or tuple of base types for all objects that\n `conversion_func` accepts.\n conversion_func: A function that converts instances of `base_type` to\n `Tensor`.\n priority: Optional integer that indicates the priority for applying this\n conversion function. Conversion functions with smaller priority values run\n earlier than conversion functions with larger priority values. Defaults to\n 100.\n\n Raises:\n TypeError: If the arguments do not have the appropriate type.\n "}, {"name": "repeat", "path": "./tf/repeat.md", "desc": "Repeat elements of `input`.", "type": "Functions", "docs": "Repeat elements of `input`.\n\n See also `tf.concat`, `tf.stack`, `tf.tile`.\n\n Args:\n input: An `N`-dimensional Tensor.\n repeats: An 1-D `int` Tensor. The number of repetitions for each element.\n repeats is broadcasted to fit the shape of the given axis. `len(repeats)`\n must equal `input.shape[axis]` if axis is not None.\n axis: An int. The axis along which to repeat values. By default (axis=None),\n use the flattened input array, and return a flat output array.\n name: A name for the operation.\n\n Returns:\n A Tensor which has the same shape as `input`, except along the given axis.\n If axis is None then the output array is flattened to match the flattened\n input array.\n\n Example usage:\n\n >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)\n \n\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)\n \n\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)\n \n\n >>> repeat(3, repeats=4)\n \n\n >>> repeat([[1,2], [3,4]], repeats=2)\n \n\n "}, {"name": "required_space_to_batch_paddings", "path": "./tf/required_space_to_batch_paddings.md", "desc": "Calculate padding required to make block_shape divide input_shape.", "type": "Functions", "docs": "Calculate padding required to make block_shape divide input_shape.\n\n This function can be used to calculate a suitable paddings argument for use\n with space_to_batch_nd and batch_to_space_nd.\n\n Args:\n input_shape: int32 Tensor of shape [N].\n block_shape: int32 Tensor of shape [N].\n base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum\n amount of padding to use. All elements must be >= 0. If not specified,\n defaults to 0.\n name: string. Optional name prefix.\n\n Returns:\n (paddings, crops), where:\n\n `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]\n satisfying:\n\n paddings[i, 0] = base_paddings[i, 0].\n 0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]\n (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0\n\n crops[i, 0] = 0\n crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]\n\n Raises: ValueError if called with incompatible shapes.\n "}, {"name": "reshape", "path": "./tf/reshape.md", "desc": "Reshapes a tensor.", "type": "Functions", "docs": "Reshapes a tensor.\n\n Given `tensor`, this operation returns a new `tf.Tensor` that has the same\n values as `tensor` in the same order, except with a new shape given by\n `shape`.\n\n >>> t1 = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> print(tf.shape(t1).numpy())\n [2 3]\n >>> t2 = tf.reshape(t1, [6])\n >>> t2\n \n >>> tf.reshape(t2, [3, 2])\n \n\n The `tf.reshape` does not change the order of or the total number of elements\n in the tensor, and so it can reuse the underlying data buffer. This makes it\n a fast operation independent of how big of a tensor it is operating on.\n\n >>> tf.reshape([1, 2, 3], [2, 2])\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Input to reshape is a tensor with 3 values, but the\n requested shape has 4\n\n To instead reorder the data to rearrange the dimensions of a tensor, see\n `tf.transpose`.\n\n >>> t = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> tf.reshape(t, [3, 2]).numpy()\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n >>> tf.transpose(t, perm=[1, 0]).numpy()\n array([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)\n\n If one component of `shape` is the special value -1, the size of that\n dimension is computed so that the total size remains constant. In particular,\n a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can\n be -1.\n\n >>> t = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> tf.reshape(t, [-1])\n \n >>> tf.reshape(t, [3, -1])\n \n >>> tf.reshape(t, [-1, 2])\n \n\n `tf.reshape(t, [])` reshapes a tensor `t` with one element to a scalar.\n\n >>> tf.reshape([7], []).numpy()\n 7\n\n More examples:\n\n >>> t = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> print(tf.shape(t).numpy())\n [9]\n >>> tf.reshape(t, [3, 3])\n \n\n >>> t = [[[1, 1], [2, 2]],\n ... [[3, 3], [4, 4]]]\n >>> print(tf.shape(t).numpy())\n [2 2 2]\n >>> tf.reshape(t, [2, 4])\n \n\n >>> t = [[[1, 1, 1],\n ... [2, 2, 2]],\n ... [[3, 3, 3],\n ... [4, 4, 4]],\n ... [[5, 5, 5],\n ... [6, 6, 6]]]\n >>> print(tf.shape(t).numpy())\n [3 2 3]\n >>> # Pass '[-1]' to flatten 't'.\n >>> tf.reshape(t, [-1])\n \n >>> # -- Using -1 to infer the shape --\n >>> # Here -1 is inferred to be 9:\n >>> tf.reshape(t, [2, -1])\n \n >>> # -1 is inferred to be 2:\n >>> tf.reshape(t, [-1, 9])\n \n >>> # -1 is inferred to be 3:\n >>> tf.reshape(t, [ 2, -1, 3])\n \n\n Args:\n tensor: A `Tensor`.\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Defines the shape of the output tensor.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "}, {"name": "reverse", "path": "./tf/reverse.md", "desc": "Reverses specific dimensions of a tensor.", "type": "Functions", "docs": "Reverses specific dimensions of a tensor.\n\n Given a `tensor`, and a `int32` tensor `axis` representing the set of\n dimensions of `tensor` to reverse. This operation reverses each dimension\n `i` for which there exists `j` s.t. `axis[j] == i`.\n\n `tensor` can have up to 8 dimensions. The number of dimensions specified\n in `axis` may be 0 or more entries. If an index is specified more than\n once, a InvalidArgument error is raised.\n\n For example:\n\n ```\n # tensor 't' is [[[[ 0, 1, 2, 3],\n # [ 4, 5, 6, 7],\n # [ 8, 9, 10, 11]],\n # [[12, 13, 14, 15],\n # [16, 17, 18, 19],\n # [20, 21, 22, 23]]]]\n # tensor 't' shape is [1, 2, 3, 4]\n\n # 'dims' is [3] or 'dims' is [-1]\n reverse(t, dims) ==> [[[[ 3, 2, 1, 0],\n [ 7, 6, 5, 4],\n [ 11, 10, 9, 8]],\n [[15, 14, 13, 12],\n [19, 18, 17, 16],\n [23, 22, 21, 20]]]]\n\n # 'dims' is '[1]' (or 'dims' is '[-3]')\n reverse(t, dims) ==> [[[[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]\n [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]]]\n\n # 'dims' is '[2]' (or 'dims' is '[-2]')\n reverse(t, dims) ==> [[[[8, 9, 10, 11],\n [4, 5, 6, 7],\n [0, 1, 2, 3]]\n [[20, 21, 22, 23],\n [16, 17, 18, 19],\n [12, 13, 14, 15]]]]\n ```\n\n Args:\n tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `int64`, `uint64`, `bool`, `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`, `string`.\n Up to 8-D.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 1-D. The indices of the dimensions to reverse. Must be in the range\n `[-rank(tensor), rank(tensor))`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "}, {"name": "reverse_sequence", "path": "./tf/reverse_sequence.md", "desc": "Reverses variable length slices.", "type": "Functions", "docs": "Reverses variable length slices.\n\n This op first slices `input` along the dimension `batch_axis`, and for\n each slice `i`, reverses the first `seq_lengths[i]` elements along the\n dimension `seq_axis`.\n\n The elements of `seq_lengths` must obey `seq_lengths[i] <=\n input.dims[seq_axis]`, and `seq_lengths` must be a vector of length\n `input.dims[batch_axis]`.\n\n The output slice `i` along dimension `batch_axis` is then given by\n input slice `i`, with the first `seq_lengths[i]` slices along\n dimension `seq_axis` reversed.\n\n Example usage:\n\n >>> seq_lengths = [7, 2, 3, 5]\n >>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],\n ... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]\n >>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)\n >>> output\n \n\n Args:\n input: A `Tensor`. The input to reverse.\n seq_lengths: A `Tensor`. Must be one of the following types: `int32`,\n `int64`. 1-D with length `input.dims(batch_axis)` and `max(seq_lengths) <=\n input.dims(seq_axis)`\n seq_axis: An `int`. The dimension which is partially reversed.\n batch_axis: An optional `int`. Defaults to `0`. The dimension along which\n reversal is performed.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor. Has the same type as input.\n "}, {"name": "roll", "path": "./tf/roll.md", "desc": "Rolls the elements of a tensor along an axis.", "type": "Functions", "docs": "Rolls the elements of a tensor along an axis.\n\n The elements are shifted positively (towards larger indices) by the offset of\n `shift` along the dimension of `axis`. Negative `shift` values will shift\n elements in the opposite direction. Elements that roll passed the last position\n will wrap around to the first and vice versa. Multiple shifts along multiple\n axes may be specified.\n\n For example:\n\n ```\n # 't' is [0, 1, 2, 3, 4]\n roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]\n\n # shifting along multiple dimensions\n # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\n roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]\n\n # shifting along the same axis multiple times\n # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\n roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]\n ```\n\n Args:\n input: A `Tensor`.\n shift: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which\n elements are shifted positively (towards larger indices) along the dimension\n specified by `axis[i]`. Negative shifts will roll the elements in the opposite\n direction.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift\n `shift[i]` should occur. If the same axis is referenced more than once, the\n total shift for that axis will be the sum of all the shifts that belong to that\n axis.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "}, {"name": "round", "path": "./tf/math/round.md", "desc": "Rounds the values of a tensor to the nearest integer, element-wise.", "type": "Functions", "docs": "Rounds the values of a tensor to the nearest integer, element-wise.\n\n Rounds half to even. Also known as bankers rounding. If you want to round\n according to the current system rounding mode use tf::cint.\n For example:\n\n ```python\n x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])\n tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]\n ```\n\n Args:\n x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as `x`.\n "}, {"name": "saturate_cast", "path": "./tf/dtypes/saturate_cast.md", "desc": "Performs a safe saturating cast of `value` to `dtype`.", "type": "Functions", "docs": "Performs a safe saturating cast of `value` to `dtype`.\n\n This function casts the input to `dtype` without applying any scaling. If\n there is a danger that values would over or underflow in the cast, this op\n applies the appropriate clamping before the cast.\n\n Args:\n value: A `Tensor`.\n dtype: The desired output `DType`.\n name: A name for the operation (optional).\n\n Returns:\n `value` safely cast to `dtype`.\n "}, {"name": "scalar_mul", "path": "./tf/math/scalar_mul.md", "desc": "Multiplies a scalar times a `Tensor` or `IndexedSlices` object.", "type": "Functions", "docs": "Multiplies a scalar times a `Tensor` or `IndexedSlices` object.\n\n This is a special case of `tf.math.multiply`, where the first value must be a\n `scalar`. Unlike the general form of `tf.math.multiply`, this is operation is\n guaranteed to be efficient for `tf.IndexedSlices`.\n\n >>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... y = tf.gather(x, [1, 2]) # IndexedSlices\n ... z = tf.math.scalar_mul(10.0, y)\n\n Args:\n scalar: A 0-D scalar `Tensor`. Must have known shape.\n x: A `Tensor` or `IndexedSlices` to be scaled.\n name: A name for the operation (optional).\n\n Returns:\n `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.\n\n Raises:\n ValueError: if scalar is not a 0-D `scalar`.\n "}, {"name": "scan", "path": "./tf/scan.md", "desc": "scan on the list of tensors unpacked from `elems` on dimension 0. (deprecated argument values", "type": "Functions", "docs": "scan on the list of tensors unpacked from `elems` on dimension 0. (deprecated argument values)\n\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(back_prop=False)`. They will be removed in a future version.\nInstructions for updating:\nback_prop=False is deprecated. Consider using tf.stop_gradient instead.\nInstead of:\nresults = tf.scan(fn, elems, back_prop=False)\nUse:\nresults = tf.nest.map_structure(tf.stop_gradient, tf.scan(fn, elems))\n\nThe simplest version of `scan` repeatedly applies the callable `fn` to a\nsequence of elements from first to last. The elements are made of the tensors\nunpacked from `elems` on dimension 0. The callable fn takes two tensors as\narguments. The first argument is the accumulated value computed from the\npreceding invocation of fn, and the second is the value at the current\nposition of `elems`. If `initializer` is None, `elems` must contain at least\none element, and its first element is used as the initializer.\n\nSuppose that `elems` is unpacked into `values`, a list of tensors. The shape\nof the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.\nIf reverse=True, it's fn(initializer, values[-1]).shape.\n\nThis method also allows multi-arity `elems` and accumulator. If `elems`\nis a (possibly nested) list or tuple of tensors, then each of these tensors\nmust have a matching first (unpack) dimension. The second argument of\n`fn` must match the structure of `elems`.\n\nIf no `initializer` is provided, the output structure and dtypes of `fn`\nare assumed to be the same as its input; and in this case, the first\nargument of `fn` must match the structure of `elems`.\n\nIf an `initializer` is provided, then the output of `fn` must have the same\nstructure as `initializer`; and the first argument of `fn` must match\nthis structure.\n\nFor example, if `elems` is `(t1, [t2, t3])` and `initializer` is\n`[i1, i2]` then an appropriate signature for `fn` in `python2` is:\n`fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list,\n`[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the\n one that works in `python3`, is:\n`fn = lambda a, t:`, where `a` and `t` correspond to the input tuples.\n\nArgs:\n fn: The callable to be performed. It accepts two arguments. The first will\n have the same structure as `initializer` if one is provided, otherwise it\n will have the same structure as `elems`. The second will have the same\n (possibly nested) structure as `elems`. Its output must have the same\n structure as `initializer` if one is provided, otherwise it must have the\n same structure as `elems`.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unpacked along their first dimension. The nested sequence of the\n resulting slices will be the first argument to `fn`.\n initializer: (optional) A tensor or (possibly nested) sequence of tensors,\n initial value for the accumulator, and the expected output type of `fn`.\n parallel_iterations: (optional) The number of iterations allowed to run in\n parallel.\n back_prop: (optional) Deprecated. False disables support for back\n propagation. Prefer using `tf.stop_gradient` instead.\n swap_memory: (optional) True enables GPU-CPU memory swapping.\n infer_shape: (optional) False disables tests for consistent output shapes.\n reverse: (optional) True scans the tensor last to first (instead of first to\n last).\n name: (optional) Name prefix for the returned tensors.\n\nReturns:\n A tensor or (possibly nested) sequence of tensors. Each tensor packs the\n results of applying `fn` to tensors unpacked from `elems` along the first\n dimension, and the previous accumulator value(s), from first to last (or\n last to first, if `reverse=True`).\n\nRaises:\n TypeError: if `fn` is not callable or the structure of the output of\n `fn` and `initializer` do not match.\n ValueError: if the lengths of the output of `fn` and `initializer`\n do not match.\n\nExamples:\n ```python\n elems = np.array([1, 2, 3, 4, 5, 6])\n sum = scan(lambda a, x: a + x, elems)\n # sum == [1, 3, 6, 10, 15, 21]\n sum = scan(lambda a, x: a + x, elems, reverse=True)\n # sum == [21, 20, 18, 15, 11, 6]\n ```\n\n ```python\n elems = np.array([1, 2, 3, 4, 5, 6])\n initializer = np.array(0)\n sum_one = scan(\n lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer)\n # sum_one == [1, 2, 3, 4, 5, 6]\n ```\n\n ```python\n elems = np.array([1, 0, 0, 0, 0, 0])\n initializer = (np.array(0), np.array(1))\n fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer)\n # fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13])\n ```"}, {"name": "scatter_nd", "path": "./tf/scatter_nd.md", "desc": "Scatters `updates` into a tensor of shape `shape` according to `indices`.", "type": "Functions", "docs": "Scatters `updates` into a tensor of shape `shape` according to `indices`.\n\n Update the input tensor by scattering sparse `updates` according to individual values at the specified `indices`.\n This op returns an `output` tensor with the `shape` you specify. This op is the\n inverse of the `tf.gather_nd` operator which extracts values or slices from a\n given tensor.\n\n This operation is similar to `tf.tensor_scatter_nd_add`, except that the tensor\n is zero-initialized. Calling `tf.scatter_nd(indices, values, shape)`\n is identical to calling\n `tf.tensor_scatter_nd_add(tf.zeros(shape, values.dtype), indices, values)`\n\n If `indices` contains duplicates, the duplicate `values` are accumulated\n (summed).\n\n **WARNING**: The order in which updates are applied is nondeterministic, so the\n output will be nondeterministic if `indices` contains duplicates;\n numbers summed in different order may yield different results because of some\n numerical approximation issues.\n\n `indices` is an integer tensor of shape `shape`. The last dimension\n of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\n The last dimension of `indices` corresponds to indices of elements\n (if `indices.shape[-1] = shape.rank`) or slices\n (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n `shape`.\n\n `updates` is a tensor with shape:\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\n The simplest form of the scatter op is to insert individual elements in\n a tensor by index. Consider an example where you want to insert 4 scattered\n elements in a rank-1 tensor with 8 elements.\n\n
\n \n
\n\n In Python, this scatter operation would look like this:\n\n ```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n shape = tf.constant([8])\n scatter = tf.scatter_nd(indices, updates, shape)\n print(scatter)\n ```\n\n The resulting tensor would look like this:\n\n [0, 11, 0, 10, 9, 0, 0, 12]\n\n You can also insert entire slices of a higher rank tensor all at once. For\n example, you can insert two slices in the first dimension of a rank-3 tensor\n with two matrices of new values.\n\n
\n \n
\n\n In Python, this scatter operation would look like this:\n\n ```python\n indices = tf.constant([[0], [2]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n shape = tf.constant([4, 4, 4])\n scatter = tf.scatter_nd(indices, updates, shape)\n print(scatter)\n ```\n\n The resulting tensor would look like this:\n\n [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]\n\n Note that on CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, the index is ignored.\n\n Args:\n indices: A `Tensor`. Must be one of the following types: `int16`, `int32`, `int64`.\n Tensor of indices.\n updates: A `Tensor`. Values to scatter into the output tensor.\n shape: A `Tensor`. Must have the same type as `indices`.\n 1-D. The shape of the output tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `updates`.\n "}, {"name": "searchsorted", "path": "./tf/searchsorted.md", "desc": "Searches for where a value would go in a sorted sequence.", "type": "Functions", "docs": "Searches for where a value would go in a sorted sequence.\n\n This is not a method for checking containment (like python `in`).\n\n The typical use case for this operation is \"binning\", \"bucketing\", or\n \"discretizing\". The `values` are assigned to bucket-indices based on the\n **edges** listed in `sorted_sequence`. This operation\n returns the bucket-index for each value.\n\n >>> edges = [-1, 3.3, 9.1, 10.0]\n >>> values = [0.0, 4.1, 12.0]\n >>> tf.searchsorted(edges, values).numpy()\n array([1, 2, 4], dtype=int32)\n\n The `side` argument controls which index is returned if a value lands exactly\n on an edge:\n\n >>> seq = [0, 3, 9, 10, 10]\n >>> values = [0, 4, 10]\n >>> tf.searchsorted(seq, values).numpy()\n array([0, 2, 3], dtype=int32)\n >>> tf.searchsorted(seq, values, side=\"right\").numpy()\n array([1, 2, 5], dtype=int32)\n\n The `axis` is not settable for this operation. It always operates on the\n innermost dimension (`axis=-1`). The operation will accept any number of\n outer dimensions. Here it is applied to the rows of a matrix:\n\n >>> sorted_sequence = [[0., 3., 8., 9., 10.],\n ... [1., 2., 3., 4., 5.]]\n >>> values = [[9.8, 2.1, 4.3],\n ... [0.1, 6.6, 4.5, ]]\n >>> tf.searchsorted(sorted_sequence, values).numpy()\n array([[4, 1, 2],\n [0, 5, 4]], dtype=int32)\n\n Note: This operation assumes that `sorted_sequence` **is sorted** along the\n innermost axis, maybe using `tf.sort(..., axis=-1)`. **If the sequence is not\n sorted no error is raised** and the content of the returned tensor is not well\n defined.\n\n Args:\n sorted_sequence: N-D `Tensor` containing a sorted sequence.\n values: N-D `Tensor` containing the search values.\n side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to\n upper_bound.\n out_type: The output type (`int32` or `int64`). Default is `tf.int32`.\n name: Optional name for the operation.\n\n Returns:\n An N-D `Tensor` the size of `values` containing the result of applying\n either lower_bound or upper_bound (depending on side) to each value. The\n result is not a global index to the entire `Tensor`, but the index in the\n last dimension.\n\n Raises:\n ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.\n If the total size of `values` exceeds `2^31 - 1` elements.\n If the first `N-1` dimensions of the two tensors don't match.\n "}, {"name": "sequence_mask", "path": "./tf/sequence_mask.md", "desc": "Returns a mask tensor representing the first N positions of each cell.", "type": "Functions", "docs": "Returns a mask tensor representing the first N positions of each cell.\n\n If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has\n dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with\n\n ```\n mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])\n ```\n\n Examples:\n\n ```python\n tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],\n # [True, True, True, False, False],\n # [True, True, False, False, False]]\n\n tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],\n # [True, True, True]],\n # [[True, True, False],\n # [False, False, False]]]\n ```\n\n Args:\n lengths: integer tensor, all its values <= maxlen.\n maxlen: scalar integer tensor, size of last dimension of returned tensor.\n Default is the maximum value in `lengths`.\n dtype: output type of the resulting tensor.\n name: name of the op.\n\n Returns:\n A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.\n Raises:\n ValueError: if `maxlen` is not a scalar.\n "}, {"name": "shape", "path": "./tf/shape.md", "desc": "Returns a tensor containing the shape of the input tensor.", "type": "Functions", "docs": "Returns a tensor containing the shape of the input tensor.\n\n See also `tf.size`, `tf.rank`.\n\n `tf.shape` returns a 1-D integer tensor representing the shape of `input`.\n For a scalar input, the tensor returned has a shape of (0,) and its value is\n the empty vector (i.e. []).\n\n For example:\n\n >>> tf.shape(1.)\n \n\n >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n >>> tf.shape(t)\n \n\n Note: When using symbolic tensors, such as when using the Keras API,\n tf.shape() will return the shape of the symbolic tensor.\n\n >>> a = tf.keras.layers.Input((None, 10))\n >>> tf.shape(a)\n <... shape=(3,) dtype=int32...>\n\n In these cases, using `tf.Tensor.shape` will return more informative results.\n\n >>> a.shape\n TensorShape([None, None, 10])\n\n (The first `None` represents the as yet unknown batch size.)\n\n `tf.shape` and `Tensor.shape` should be identical in eager mode. Within\n `tf.function` or within a `compat.v1` context, not all dimensions may be\n known until execution time. Hence when defining custom layers and models\n for graph mode, prefer the dynamic `tf.shape(x)` over the static `x.shape`.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n out_type: (Optional) The specified output type of the operation (`int32` or\n `int64`). Defaults to `tf.int32`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `out_type`.\n "}, {"name": "shape_n", "path": "./tf/shape_n.md", "desc": "Returns shape of tensors.", "type": "Functions", "docs": "Returns shape of tensors.\n\n Args:\n input: A list of at least 1 `Tensor` object with the same type.\n out_type: The specified output type of the operation (`int32` or `int64`).\n Defaults to `tf.int32`(optional).\n name: A name for the operation (optional).\n\n Returns:\n A list with the same length as `input` of `Tensor` objects with\n type `out_type`.\n "}, {"name": "sigmoid", "path": "./tf/math/sigmoid.md", "desc": "Computes sigmoid of `x` element-wise.", "type": "Functions", "docs": "Computes sigmoid of `x` element-wise.\n\n Formula for calculating $\\mathrm{sigmoid}(x) = y = 1 / (1 + \\exp(-x))$.\n\n For $x \\in (-\\infty, \\infty)$, $\\mathrm{sigmoid}(x) \\in (0, 1)$.\n\n Example Usage:\n\n If a positive number is large, then its sigmoid will approach to 1 since the\n formula will be `y = / (1 + )`\n\n >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])\n >>> tf.math.sigmoid(x)\n \n\n If a negative number is large, its sigmoid will approach to 0 since the\n formula will be `y = 1 / (1 + )`\n\n >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])\n >>> tf.math.sigmoid(x)\n \n\n Args:\n x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or\n `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x`.\n\n Usage Example:\n\n >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)\n >>> tf.sigmoid(x)\n \n\n @compatibility(scipy)\n Equivalent to scipy.special.expit\n @end_compatibility\n "}, {"name": "sign", "path": "./tf/math/sign.md", "desc": "Returns an element-wise indication of the sign of a number.", "type": "Functions", "docs": "Returns an element-wise indication of the sign of a number.\n\n `y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`.\n\n For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`.\n\n Example usage:\n\n >>> # real number\n >>> tf.math.sign([0., 2., -3.])\n \n\n >>> # complex number\n >>> tf.math.sign([1 + 1j, 0 + 0j])\n \n\n Args:\n x: A Tensor. Must be one of the following types: bfloat16, half, float32,\n float64, int32, int64, complex64, complex128.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor. Has the same type as x.\n\n If x is a SparseTensor, returns SparseTensor(x.indices,\n tf.math.sign(x.values, ...), x.dense_shape).\n\n If `x` is a `SparseTensor`, returns\n `SparseTensor(x.indices, tf.math.sign(x.values, ...), x.dense_shape)`"}, {"name": "sin", "path": "./tf/math/sin.md", "desc": "Computes sine of x element-wise.", "type": "Functions", "docs": "Computes sine of x element-wise.\n\n Given an input tensor, this function computes sine of every\n element in the tensor. Input range is `(-inf, inf)` and\n output range is `[-1,1]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10, float(\"inf\")])\n tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "sinh", "path": "./tf/math/sinh.md", "desc": "Computes hyperbolic sine of x element-wise.", "type": "Functions", "docs": "Computes hyperbolic sine of x element-wise.\n\n Given an input tensor, this function computes hyperbolic sine of every\n element in the tensor. Input range is `[-inf,inf]` and output range\n is `[-inf,inf]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 2, 10, float(\"inf\")])\n tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "size", "path": "./tf/size.md", "desc": "Returns the size of a tensor.", "type": "Functions", "docs": "Returns the size of a tensor.\n\n See also `tf.shape`.\n\n Returns a 0-D `Tensor` representing the number of elements in `input`\n of type `out_type`. Defaults to tf.int32.\n\n For example:\n\n >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n >>> tf.size(t)\n \n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified non-quantized numeric output type of the\n operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n\n @compatibility(numpy)\n Equivalent to np.size()\n @end_compatibility\n "}, {"name": "slice", "path": "./tf/slice.md", "desc": "Extracts a slice from a tensor.", "type": "Functions", "docs": "Extracts a slice from a tensor.\n\n See also `tf.strided_slice`.\n\n This operation extracts a slice of size `size` from a tensor `input_` starting\n at the location specified by `begin`. The slice `size` is represented as a\n tensor shape, where `size[i]` is the number of elements of the 'i'th dimension\n of `input_` that you want to slice. The starting location (`begin`) for the\n slice is represented as an offset in each dimension of `input_`. In other\n words, `begin[i]` is the offset into the i'th dimension of `input_` that you\n want to slice from.\n\n Note that `tf.Tensor.__getitem__` is typically a more pythonic way to\n perform slices, as it allows you to write `foo[3:7, :-2]` instead of\n `tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.\n\n `begin` is zero-based; `size` is one-based. If `size[i]` is -1,\n all remaining elements in dimension i are included in the\n slice. In other words, this is equivalent to setting:\n\n `size[i] = input_.dim_size(i) - begin[i]`\n\n This operation requires that:\n\n `0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]\n tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],\n # [[5, 5, 5]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n size: An `int32` or `int64` `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input_`.\n "}, {"name": "sort", "path": "./tf/sort.md", "desc": "Sorts a tensor.", "type": "Functions", "docs": "Sorts a tensor.\n\n Usage:\n\n >>> a = [1, 10, 26.9, 2.8, 166.32, 62.3]\n >>> tf.sort(a).numpy()\n array([ 1. , 2.8 , 10. , 26.9 , 62.3 , 166.32], dtype=float32)\n\n >>> tf.sort(a, direction='DESCENDING').numpy()\n array([166.32, 62.3 , 26.9 , 10. , 2.8 , 1. ], dtype=float32)\n\n For multidimensional inputs you can control which axis the sort is applied\n along. The default `axis=-1` sorts the innermost axis.\n\n >>> mat = [[3,2,1],\n ... [2,1,3],\n ... [1,3,2]]\n >>> tf.sort(mat, axis=-1).numpy()\n array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]], dtype=int32)\n >>> tf.sort(mat, axis=0).numpy()\n array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]], dtype=int32)\n\n See also:\n\n * `tf.argsort`: Like sort, but it returns the sort indices.\n * `tf.math.top_k`: A partial sort that returns a fixed number of top values\n and corresponding indices.\n\n\n Args:\n values: 1-D or higher **numeric** `Tensor`.\n axis: The axis along which to sort. The default is -1, which sorts the last\n axis.\n direction: The direction in which to sort the values (`'ASCENDING'` or\n `'DESCENDING'`).\n name: Optional name for the operation.\n\n Returns:\n A `Tensor` with the same dtype and shape as `values`, with the elements\n sorted along the given `axis`.\n\n Raises:\n tf.errors.InvalidArgumentError: If the `values.dtype` is not a `float` or\n `int` type.\n ValueError: If axis is not a constant scalar, or the direction is invalid.\n "}, {"name": "space_to_batch", "path": "./tf/space_to_batch.md", "desc": "SpaceToBatch for N-D tensors of type T.", "type": "Functions", "docs": "SpaceToBatch for N-D tensors of type T.\n\n This operation divides \"spatial\" dimensions `[1, ..., M]` of the input into a\n grid of blocks of shape `block_shape`, and interleaves these blocks with the\n \"batch\" dimension (0) such that in the output, the spatial dimensions\n `[1, ..., M]` correspond to the position within the grid, and the batch\n dimension combines both the position within a spatial block and the original\n batch position. Prior to division into blocks, the spatial dimensions of the\n input are optionally zero padded according to `paddings`. See below for a\n precise description.\n\n This operation is equivalent to the following steps:\n\n 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n 2. Reshape `padded` to `reshaped_padded` of shape:\n\n [batch] +\n [padded_shape[1] / block_shape[0],\n block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1],\n block_shape[M-1]] +\n remaining_shape\n\n 3. Permute dimensions of `reshaped_padded` to produce\n `permuted_reshaped_padded` of shape:\n\n block_shape +\n [batch] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n dimension, producing an output tensor of shape:\n\n [batch * prod(block_shape)] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n Some examples:\n\n (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1], [2]], [[3], [4]]]]\n ```\n\n The output tensor has shape `[4, 1, 1, 1]` and value:\n\n ```\n [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n ```\n\n (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n ```\n\n The output tensor has shape `[4, 1, 1, 3]` and value:\n\n ```\n [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n ```\n\n (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n The output tensor has shape `[4, 2, 2, 1]` and value:\n\n ```\n x = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n ```\n\n (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n paddings = `[[0, 0], [2, 0]]`:\n\n ```\n x = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n The output tensor has shape `[8, 1, 3, 1]` and value:\n\n ```\n x = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n ```\n\n Among others, this operation is useful for reducing atrous convolution into\n regular convolution.\n\n Args:\n input: A `Tensor`.\n N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\n where spatial_shape has `M` dimensions.\n block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 1-D with shape `[M]`, all values must be >= 1.\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 2-D with shape `[M, 2]`, all values must be >= 0.\n `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n `i + 1`, which corresponds to spatial dimension `i`. It is required that\n `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "}, {"name": "space_to_batch_nd", "path": "./tf/space_to_batch_nd.md", "desc": "SpaceToBatch for N-D tensors of type T.", "type": "Functions", "docs": "SpaceToBatch for N-D tensors of type T.\n\n This operation divides \"spatial\" dimensions `[1, ..., M]` of the input into a\n grid of blocks of shape `block_shape`, and interleaves these blocks with the\n \"batch\" dimension (0) such that in the output, the spatial dimensions\n `[1, ..., M]` correspond to the position within the grid, and the batch\n dimension combines both the position within a spatial block and the original\n batch position. Prior to division into blocks, the spatial dimensions of the\n input are optionally zero padded according to `paddings`. See below for a\n precise description.\n\n This operation is equivalent to the following steps:\n\n 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n 2. Reshape `padded` to `reshaped_padded` of shape:\n\n [batch] +\n [padded_shape[1] / block_shape[0],\n block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1],\n block_shape[M-1]] +\n remaining_shape\n\n 3. Permute dimensions of `reshaped_padded` to produce\n `permuted_reshaped_padded` of shape:\n\n block_shape +\n [batch] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n dimension, producing an output tensor of shape:\n\n [batch * prod(block_shape)] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n Some examples:\n\n (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1], [2]], [[3], [4]]]]\n ```\n\n The output tensor has shape `[4, 1, 1, 1]` and value:\n\n ```\n [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n ```\n\n (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n ```\n\n The output tensor has shape `[4, 1, 1, 3]` and value:\n\n ```\n [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n ```\n\n (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n ```\n x = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n The output tensor has shape `[4, 2, 2, 1]` and value:\n\n ```\n x = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n ```\n\n (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n paddings = `[[0, 0], [2, 0]]`:\n\n ```\n x = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n The output tensor has shape `[8, 1, 3, 1]` and value:\n\n ```\n x = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n ```\n\n Among others, this operation is useful for reducing atrous convolution into\n regular convolution.\n\n Args:\n input: A `Tensor`.\n N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\n where spatial_shape has `M` dimensions.\n block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 1-D with shape `[M]`, all values must be >= 1.\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 2-D with shape `[M, 2]`, all values must be >= 0.\n `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n `i + 1`, which corresponds to spatial dimension `i`. It is required that\n `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "}, {"name": "split", "path": "./tf/split.md", "desc": "Splits a tensor `value` into a list of sub tensors.", "type": "Functions", "docs": "Splits a tensor `value` into a list of sub tensors.\n\n See also `tf.unstack`.\n\n If `num_or_size_splits` is an `int`, then it splits `value` along the\n dimension `axis` into `num_or_size_splits` smaller tensors. This requires that\n `value.shape[axis]` is divisible by `num_or_size_splits`.\n\n If `num_or_size_splits` is a 1-D Tensor (or list), then `value` is split into\n `len(num_or_size_splits)` elements. The shape of the `i`-th\n element has the same size as the `value` except along dimension `axis` where\n the size is `num_or_size_splits[i]`.\n\n For example:\n\n >>> x = tf.Variable(tf.random.uniform([5, 30], -1, 1))\n >>>\n >>> # Split `x` into 3 tensors along dimension 1\n >>> s0, s1, s2 = tf.split(x, num_or_size_splits=3, axis=1)\n >>> tf.shape(s0).numpy()\n array([ 5, 10], dtype=int32)\n >>>\n >>> # Split `x` into 3 tensors with sizes [4, 15, 11] along dimension 1\n >>> split0, split1, split2 = tf.split(x, [4, 15, 11], 1)\n >>> tf.shape(split0).numpy()\n array([5, 4], dtype=int32)\n >>> tf.shape(split1).numpy()\n array([ 5, 15], dtype=int32)\n >>> tf.shape(split2).numpy()\n array([ 5, 11], dtype=int32)\n\n Args:\n value: The `Tensor` to split.\n num_or_size_splits: Either an `int` indicating the number of splits\n along `axis` or a 1-D integer `Tensor` or Python list containing the sizes\n of each output tensor along `axis`. If an `int`, then it must evenly\n divide `value.shape[axis]`; otherwise the sum of sizes along the split\n axis must match that of the `value`.\n axis: An `int` or scalar `int32` `Tensor`. The dimension along which\n to split. Must be in the range `[-rank(value), rank(value))`. Defaults to\n 0.\n num: Optional, an `int`, used to specify the number of outputs when it\n cannot be inferred from the shape of `size_splits`.\n name: A name for the operation (optional).\n\n Returns:\n if `num_or_size_splits` is an `int` returns a list of\n `num_or_size_splits` `Tensor` objects; if `num_or_size_splits` is a 1-D\n list or 1-D `Tensor` returns `num_or_size_splits.get_shape[0]`\n `Tensor` objects resulting from splitting `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n ValueError: If `num_or_size_splits` is a scalar `Tensor`.\n "}, {"name": "sqrt", "path": "./tf/math/sqrt.md", "desc": "Computes element-wise square root of the input tensor.", "type": "Functions", "docs": "Computes element-wise square root of the input tensor.\n\n Note: This operation does not support integer types.\n\n >>> x = tf.constant([[4.0], [16.0]])\n >>> tf.sqrt(x)\n \n >>> y = tf.constant([[-4.0], [16.0]])\n >>> tf.sqrt(y)\n \n >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)\n >>> tf.sqrt(z)\n \n\n Note: In order to support complex type, please provide an input tensor\n of `complex64` or `complex128`.\n\n Args:\n x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,\n `complex64`, `complex128`\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of same size, type and sparsity as `x`.\n\n If `x` is a `SparseTensor`, returns\n `SparseTensor(x.indices, tf.math.sqrt(x.values, ...), x.dense_shape)`"}, {"name": "square", "path": "./tf/math/square.md", "desc": "Computes square of x element-wise.", "type": "Functions", "docs": "Computes square of x element-wise.\n\n I.e., \\\\(y = x * x = x^2\\\\).\n\n >>> tf.math.square([-2., 0., 3.])\n \n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n\n If `x` is a `SparseTensor`, returns\n `SparseTensor(x.indices, tf.math.square(x.values, ...), x.dense_shape)`"}, {"name": "squeeze", "path": "./tf/squeeze.md", "desc": "Removes dimensions of size 1 from the shape of a tensor.", "type": "Functions", "docs": "Removes dimensions of size 1 from the shape of a tensor.\n\n Given a tensor `input`, this operation returns a tensor of the same type with\n all dimensions of size 1 removed. If you don't want to remove all size 1\n dimensions, you can remove specific size 1 dimensions by specifying\n `axis`.\n\n For example:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t)) # [2, 3]\n ```\n\n Or, to remove specific size 1 dimensions:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]\n ```\n\n Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a\n deprecated `squeeze_dims` argument.\n\n Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`\n time, where `N` is the number of elements in the squeezed dimensions.\n\n Args:\n input: A `Tensor`. The `input` to squeeze.\n axis: An optional list of `ints`. Defaults to `[]`. If specified, only\n squeezes the dimensions listed. The dimension index starts at 0. It is an\n error to squeeze a dimension that is not 1. Must be in the range\n `[-rank(input), rank(input))`. Must be specified if `input` is a\n `RaggedTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Contains the same data as `input`, but has one or more dimensions of\n size 1 removed.\n\n Raises:\n ValueError: The input cannot be converted to a tensor, or the specified\n axis cannot be squeezed.\n "}, {"name": "stack", "path": "./tf/stack.md", "desc": "Stacks a list of rank-`R` tensors into one rank-`(R+1", "type": "Functions", "docs": "Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.\n\n See also `tf.concat`, `tf.tile`, `tf.repeat`.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the `axis` dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`;\n\n if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.\n if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.\n Etc.\n\n For example:\n\n >>> x = tf.constant([1, 4])\n >>> y = tf.constant([2, 5])\n >>> z = tf.constant([3, 6])\n >>> tf.stack([x, y, z])\n \n >>> tf.stack([x, y, z], axis=1)\n \n\n This is the opposite of unstack. The numpy equivalent is `np.stack`\n\n >>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))\n True\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n axis: An `int`. The axis to stack along. Defaults to the first dimension.\n Negative values wrap around, so the valid range is `[-(R+1), R+1)`.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n\n Raises:\n ValueError: If `axis` is out of the range [-(R+1), R+1).\n "}, {"name": "stop_gradient", "path": "./tf/stop_gradient.md", "desc": "Stops gradient computation.", "type": "Functions", "docs": "Stops gradient computation.\n\n When executed in a graph, this op outputs its input tensor as-is.\n\n When building ops to compute gradients, this op prevents the contribution of\n its inputs to be taken into account. Normally, the gradient generator adds ops\n to a graph to compute the derivatives of a specified 'loss' by recursively\n finding out inputs that contributed to its computation. If you insert this op\n in the graph it inputs are masked from the gradient generator. They are not\n taken into account for computing gradients.\n\n This is useful any time you want to compute a value with TensorFlow but need\n to pretend that the value was a constant. For example, the softmax function\n for a vector x can be written as\n\n ```python\n\n def softmax(x):\n numerator = tf.exp(x)\n denominator = tf.reduce_sum(numerator)\n return numerator / denominator\n ```\n\n This however is susceptible to overflow if the values in x are large. An\n alternative more stable way is to subtract the maximum of x from each of the\n values.\n\n ```python\n\n def stable_softmax(x):\n z = x - tf.reduce_max(x)\n numerator = tf.exp(z)\n denominator = tf.reduce_sum(numerator)\n return numerator / denominator\n ```\n\n However, when we backprop through the softmax to x, we dont want to backprop\n through the `tf.reduce_max(x)` (if the max values are not unique then the\n gradient could flow to the wrong input) calculation and treat that as a\n constant. Therefore, we should write this out as\n\n ```python\n\n def stable_softmax(x):\n z = x - tf.stop_gradient(tf.reduce_max(x))\n numerator = tf.exp(z)\n denominator = tf.reduce_sum(numerator)\n return numerator / denominator\n ```\n\n Some other examples include:\n\n * The *EM* algorithm where the *M-step* should not involve backpropagation\n through the output of the *E-step*.\n * Contrastive divergence training of Boltzmann machines where, when\n differentiating the energy function, the training must not backpropagate\n through the graph that generated the samples from the model.\n * Adversarial training, where no backprop should happen through the adversarial\n example generation process.\n\n Args:\n input: A `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "}, {"name": "strided_slice", "path": "./tf/strided_slice.md", "desc": "Extracts a strided slice of a tensor (generalized Python array indexing", "type": "Functions", "docs": "Extracts a strided slice of a tensor (generalized Python array indexing).\n\n See also `tf.slice`.\n\n **Instead of calling this op directly most users will want to use the\n NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which\n is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**\n The interface of this op is a low-level encoding of the slicing syntax.\n\n Roughly speaking, this op extracts a slice of size `(end-begin)/stride`\n from the given `input_` tensor. Starting at the location specified by `begin`\n the slice continues by adding `stride` to the index until all dimensions are\n not less than `end`.\n Note that a stride can be negative, which causes a reverse slice.\n\n Given a Python slice `input[spec0, spec1, ..., specn]`,\n this function will be called as follows.\n\n `begin`, `end`, and `strides` will be vectors of length n.\n n in general is not equal to the rank of the `input_` tensor.\n\n In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,\n `new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to\n the ith spec.\n\n If the ith bit of `begin_mask` is set, `begin[i]` is ignored and\n the fullest possible range in that dimension is used instead.\n `end_mask` works analogously, except with the end range.\n\n `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.\n `foo[::-1]` reverses a tensor with shape 8.\n\n If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions\n as needed will be inserted between other dimensions. Only one\n non-zero bit is allowed in `ellipsis_mask`.\n\n For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is\n equivalent to `foo[3:5,:,:,4:5]` and\n `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.\n\n If the ith bit of `new_axis_mask` is set, then `begin`,\n `end`, and `stride` are ignored and a new length 1 dimension is\n added at this point in the output tensor.\n\n For example,\n `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.\n\n If the ith bit of `shrink_axis_mask` is set, it implies that the ith\n specification shrinks the dimensionality by 1, taking on the value at index\n `begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in\n Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`\n equal to 2.\n\n\n NOTE: `begin` and `end` are zero-indexed.\n `strides` entries must be non-zero.\n\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]\n tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],\n # [3, 3, 3]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n end: An `int32` or `int64` `Tensor`.\n strides: An `int32` or `int64` `Tensor`.\n begin_mask: An `int32` mask.\n end_mask: An `int32` mask.\n ellipsis_mask: An `int32` mask.\n new_axis_mask: An `int32` mask.\n shrink_axis_mask: An `int32` mask.\n var: The variable corresponding to `input_` or None\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n "}, {"name": "subtract", "path": "./tf/math/subtract.md", "desc": "Returns x - y element-wise.", "type": "Functions", "docs": "Returns x - y element-wise.\n\n *NOTE*: `tf.subtract` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Both input and output have a range `(-inf, inf)`.\n\n Example usages below.\n\n Subtract operation between an array and a scalar:\n\n >>> x = [1, 2, 3, 4, 5]\n >>> y = 1\n >>> tf.subtract(x, y)\n \n >>> tf.subtract(y, x)\n \n\n Note that binary `-` operator can be used instead:\n\n >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])\n >>> y = tf.convert_to_tensor(1)\n >>> x - y\n \n\n Subtract operation between an array and a tensor of same shape:\n\n >>> x = [1, 2, 3, 4, 5]\n >>> y = tf.constant([5, 4, 3, 2, 1])\n >>> tf.subtract(y, x)\n \n\n **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a\n non-tensor, the non-tensor input will adopt (or get casted to) the data type\n of the tensor input. This can potentially cause unwanted overflow or underflow\n conversion.\n\n For example,\n\n >>> x = tf.constant([1, 2], dtype=tf.int8)\n >>> y = [2**8 + 1, 2**8 + 2]\n >>> tf.subtract(x, y)\n \n\n When subtracting two input values of different shapes, `tf.subtract` follows the\n [general broadcasting rules](https://numpy.org/doc/stable/user/basics.broadcasting.html#general-broadcasting-rules)\n . The two input array shapes are compared element-wise. Starting with the\n trailing dimensions, the two dimensions either have to be equal or one of them\n needs to be `1`.\n\n For example,\n\n >>> x = np.ones(6).reshape(2, 3, 1)\n >>> y = np.ones(6).reshape(2, 1, 3)\n >>> tf.subtract(x, y)\n \n\n Example with inputs of different dimensions:\n\n >>> x = np.ones(6).reshape(2, 3, 1)\n >>> y = np.ones(6).reshape(1, 6)\n >>> tf.subtract(x, y)\n \n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `uint32`, `uint64`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "switch_case", "path": "./tf/switch_case.md", "desc": "Create a switch/case operation, i.e. an integer-indexed conditional.", "type": "Functions", "docs": "Create a switch/case operation, i.e. an integer-indexed conditional.\n\n See also `tf.case`.\n\n This op can be substantially more efficient than `tf.case` when exactly one\n branch will be selected. `tf.switch_case` is more like a C++ switch/case\n statement than `tf.case`, which is more like an if/elif/elif/else chain.\n\n The `branch_fns` parameter is either a dict from `int` to callables, or list\n of (`int`, callable) pairs, or simply a list of callables (in which case the\n index is implicitly the key). The `branch_index` `Tensor` is used to select an\n element in `branch_fns` with matching `int` key, falling back to `default`\n if none match, or `max(keys)` if no `default` is provided. The keys must form\n a contiguous set from `0` to `len(branch_fns) - 1`.\n\n `tf.switch_case` supports nested structures as implemented in `tf.nest`. All\n callables must return the same (possibly nested) value structure of lists,\n tuples, and/or named tuples.\n\n **Example:**\n\n Pseudocode:\n\n ```c++\n switch (branch_index) { // c-style switch\n case 0: return 17;\n case 1: return 31;\n default: return -1;\n }\n ```\n or\n ```python\n branches = {0: lambda: 17, 1: lambda: 31}\n branches.get(branch_index, lambda: -1)()\n ```\n\n Expressions:\n\n ```python\n def f1(): return tf.constant(17)\n def f2(): return tf.constant(31)\n def f3(): return tf.constant(-1)\n r = tf.switch_case(branch_index, branch_fns={0: f1, 1: f2}, default=f3)\n # Equivalent: tf.switch_case(branch_index, branch_fns={0: f1, 1: f2, 2: f3})\n ```\n\n Args:\n branch_index: An int Tensor specifying which of `branch_fns` should be\n executed.\n branch_fns: A `dict` mapping `int`s to callables, or a `list` of\n (`int`, callable) pairs, or simply a list of callables (in which case the\n index serves as the key). Each callable must return a matching structure\n of tensors.\n default: Optional callable that returns a structure of tensors.\n name: A name for this operation (optional).\n\n Returns:\n The tensors returned by the callable identified by `branch_index`, or those\n returned by `default` if no key matches and `default` was provided, or those\n returned by the max-keyed `branch_fn` if no `default` is provided.\n\n Raises:\n TypeError: If `branch_fns` is not a list/dictionary.\n TypeError: If `branch_fns` is a list but does not contain 2-tuples or\n callables.\n TypeError: If `fns[i]` is not callable for any i, or `default` is not\n callable.\n "}, {"name": "tan", "path": "./tf/math/tan.md", "desc": "Computes tan of x element-wise.", "type": "Functions", "docs": "Computes tan of x element-wise.\n\n Given an input tensor, this function computes tangent of every\n element in the tensor. Input range is `(-inf, inf)` and\n output range is `(-inf, inf)`. If input lies outside the boundary, `nan`\n is returned.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "tanh", "path": "./tf/math/tanh.md", "desc": "Computes hyperbolic tangent of `x` element-wise.", "type": "Functions", "docs": "Computes hyperbolic tangent of `x` element-wise.\n\n Given an input tensor, this function computes hyperbolic tangent of every\n element in the tensor. Input range is `[-inf, inf]` and\n output range is `[-1,1]`.\n\n >>> x = tf.constant([-float(\"inf\"), -5, -0.5, 1, 1.2, 2, 3, float(\"inf\")])\n >>> tf.math.tanh(x)\n \n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n\n If `x` is a `SparseTensor`, returns\n `SparseTensor(x.indices, tf.math.tanh(x.values, ...), x.dense_shape)`"}, {"name": "tensor_scatter_nd_add", "path": "./tf/tensor_scatter_nd_add.md", "desc": "Adds sparse `updates` to an existing tensor according to `indices`.", "type": "Functions", "docs": "Adds sparse `updates` to an existing tensor according to `indices`.\n\n This operation creates a new tensor by adding sparse `updates` to the passed\n in `tensor`.\n This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the\n updates are added onto an existing tensor (as opposed to a variable). If the\n memory for the existing tensor cannot be re-used, a copy is made and updated.\n\n `indices` is an integer tensor containing indices into a new tensor of shape\n `tensor.shape`. The last dimension of `indices` can be at most the rank of\n `tensor.shape`:\n\n ```\n indices.shape[-1] <= tensor.shape.rank\n ```\n\n The last dimension of `indices` corresponds to indices into elements\n (if `indices.shape[-1] = tensor.shape.rank`) or slices\n (if `indices.shape[-1] < tensor.shape.rank`) along dimension\n `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape\n\n ```\n indices.shape[:-1] + tensor.shape[indices.shape[-1]:]\n ```\n\n The simplest form of `tensor_scatter_nd_add` is to add individual elements to a\n tensor by index. For example, say we want to add 4 elements in a rank-1\n tensor with 8 elements.\n\n In Python, this scatter add operation would look like this:\n\n >>> indices = tf.constant([[4], [3], [1], [7]])\n >>> updates = tf.constant([9, 10, 11, 12])\n >>> tensor = tf.ones([8], dtype=tf.int32)\n >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates)\n >>> updated\n \n\n We can also, insert entire slices of a higher rank tensor all at once. For\n example, if we wanted to insert two slices in the first dimension of a\n rank-3 tensor with two matrices of new values.\n\n In Python, this scatter add operation would look like this:\n\n >>> indices = tf.constant([[0], [2]])\n >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n ... [7, 7, 7, 7], [8, 8, 8, 8]],\n ... [[5, 5, 5, 5], [6, 6, 6, 6],\n ... [7, 7, 7, 7], [8, 8, 8, 8]]])\n >>> tensor = tf.ones([4, 4, 4],dtype=tf.int32)\n >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates)\n >>> updated\n \n\n Note: on CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, the index is ignored.\n\n Args:\n tensor: A `Tensor`. Tensor to copy/update.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n updates: A `Tensor`. Must have the same type as `tensor`.\n Updates to scatter into output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "}, {"name": "tensor_scatter_nd_max", "path": "./tf/tensor_scatter_nd_max.md", "desc": "Apply a sparse update to a tensor taking the element-wise maximum.", "type": "Functions", "docs": "Apply a sparse update to a tensor taking the element-wise maximum.\n\n Returns a new tensor copied from `tensor` whose values are element-wise maximum between\n tensor and updates according to the indices.\n\n >>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] \n >>> indices = [[1], [4], [5]]\n >>> updates = [1, -1, 1]\n >>> tf.tensor_scatter_nd_max(tensor, indices, updates).numpy()\n array([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32)\n\n Refer to `tf.tensor_scatter_nd_update` for more details.\n\n Args:\n tensor: A `Tensor`. Tensor to update.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n updates: A `Tensor`. Must have the same type as `tensor`.\n Updates to scatter into output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "}, {"name": "tensor_scatter_nd_min", "path": "./tf/tensor_scatter_nd_min.md", "desc": "", "type": "Functions", "docs": "TODO: add doc.\n\n Args:\n tensor: A `Tensor`. Tensor to update.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n updates: A `Tensor`. Must have the same type as `tensor`.\n Updates to scatter into output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "}, {"name": "tensor_scatter_nd_sub", "path": "./tf/tensor_scatter_nd_sub.md", "desc": "Subtracts sparse `updates` from an existing tensor according to `indices`.", "type": "Functions", "docs": "Subtracts sparse `updates` from an existing tensor according to `indices`.\n\n This operation creates a new tensor by subtracting sparse `updates` from the\n passed in `tensor`.\n This operation is very similar to `tf.scatter_nd_sub`, except that the updates\n are subtracted from an existing tensor (as opposed to a variable). If the memory\n for the existing tensor cannot be re-used, a copy is made and updated.\n\n `indices` is an integer tensor containing indices into a new tensor of shape\n `shape`. The last dimension of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\n The last dimension of `indices` corresponds to indices into elements\n (if `indices.shape[-1] = shape.rank`) or slices\n (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n `shape`. `updates` is a tensor with shape\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\n The simplest form of tensor_scatter_sub is to subtract individual elements\n from a tensor by index. For example, say we want to insert 4 scattered elements\n in a rank-1 tensor with 8 elements.\n\n In Python, this scatter subtract operation would look like this:\n\n ```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n tensor = tf.ones([8], dtype=tf.int32)\n updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)\n print(updated)\n ```\n\n The resulting tensor would look like this:\n\n [1, -10, 1, -9, -8, 1, 1, -11]\n\n We can also, insert entire slices of a higher rank tensor all at once. For\n example, if we wanted to insert two slices in the first dimension of a\n rank-3 tensor with two matrices of new values.\n\n In Python, this scatter add operation would look like this:\n\n ```python\n indices = tf.constant([[0], [2]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n tensor = tf.ones([4, 4, 4],dtype=tf.int32)\n updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)\n print(updated)\n ```\n\n The resulting tensor would look like this:\n\n [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],\n [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]\n\n Note that on CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, the index is ignored.\n\n Args:\n tensor: A `Tensor`. Tensor to copy/update.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n updates: A `Tensor`. Must have the same type as `tensor`.\n Updates to scatter into output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n "}, {"name": "tensor_scatter_nd_update", "path": "./tf/tensor_scatter_nd_update.md", "desc": "Scatter `updates` into an existing tensor according to `indices`.", "type": "Functions", "docs": "Scatter `updates` into an existing tensor according to `indices`.\n\n This operation creates a new tensor by applying sparse `updates` to the\n input `tensor`. This is similar to an index assignment.\n\n ```\n # Not implemented: tensors cannot be updated inplace.\n tensor[indices] = updates\n ```\n\n If an out of bound index is found on CPU, an error is returned.\n\n > **WARNING**: There are some GPU specific semantics for this operation.\n >\n > - If an out of bound index is found, the index is ignored.\n > - The order in which updates are applied is nondeterministic, so the output\n > will be nondeterministic if `indices` contains duplicates.\n\n This operation is very similar to `tf.scatter_nd`, except that the updates are\n scattered onto an existing tensor (as opposed to a zero-tensor). If the memory\n for the existing tensor cannot be re-used, a copy is made and updated.\n\n In general:\n\n * `indices` is an integer tensor - the indices to update in `tensor`.\n * `indices` has **at least two** axes, the last axis is the depth of the\n index vectors.\n * For each index vector in `indices` there is a corresponding entry in\n `updates`.\n * If the length of the index vectors matches the rank of the `tensor`, then\n the index vectors each point to scalars in `tensor` and each update is a\n scalar.\n * If the length of the index vectors is less than the rank of `tensor`, then\n the index vectors each point to slices of `tensor` and shape of the updates\n must match that slice.\n\n Overall this leads to the following shape constraints:\n\n ```\n assert tf.rank(indices) >= 2\n index_depth = indices.shape[-1]\n batch_shape = indices.shape[:-1]\n assert index_depth <= tf.rank(tensor)\n outer_shape = tensor.shape[:index_depth]\n inner_shape = tensor.shape[index_depth:]\n assert updates.shape == batch_shape + inner_shape\n ```\n\n Typical usage is often much simpler than this general form, and it\n can be better understood starting with simple examples:\n\n ### Scalar updates\n\n The simplest usage inserts scalar elements into a tensor by index.\n In this case, the `index_depth` must equal the rank of the\n input `tensor`, slice each column of `indices` is an index into an axis of the\n input `tensor`.\n\n In this simplest case the shape constraints are:\n\n ```\n num_updates, index_depth = indices.shape.as_list()\n assert updates.shape == [num_updates]\n assert index_depth == tf.rank(tensor)`\n ```\n\n For example, to insert 4 scattered elements in a rank-1 tensor with\n 8 elements.\n\n
\n \n
\n\n This scatter operation would look like this:\n\n >>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] # tf.rank(tensor) == 1\n >>> indices = [[1], [3], [4], [7]] # num_updates == 4, index_depth == 1\n >>> updates = [9, 10, 11, 12] # num_updates == 4\n >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates))\n tf.Tensor([ 0 9 0 10 11 0 0 12], shape=(8,), dtype=int32)\n\n The length (first axis) of `updates` must equal the length of the `indices`:\n `num_updates`. This is the number of updates being inserted. Each scalar\n update is inserted into `tensor` at the indexed location.\n\n For a higher rank input `tensor` scalar updates can be inserted by using an\n `index_depth` that matches `tf.rank(tensor)`:\n\n >>> tensor = [[1, 1], [1, 1], [1, 1]] # tf.rank(tensor) == 2\n >>> indices = [[0, 1], [2, 0]] # num_updates == 2, index_depth == 2\n >>> updates = [5, 10] # num_updates == 2\n >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates))\n tf.Tensor(\n [[ 1 5]\n [ 1 1]\n [10 1]], shape=(3, 2), dtype=int32)\n\n ### Slice updates\n\n When the input `tensor` has more than one axis scatter can be used to update\n entire slices.\n\n In this case it's helpful to think of the input `tensor` as being a two level\n array-of-arrays. The shape of this two level array is split into the\n `outer_shape` and the `inner_shape`.\n\n `indices` indexes into the outer level of the input tensor (`outer_shape`).\n and replaces the sub-array at that location with the corresponding item from\n the `updates` list. The shape of each update is `inner_shape`.\n\n When updating a list of slices the shape constraints are:\n\n ```\n num_updates, index_depth = indices.shape.as_list()\n inner_shape = tensor.shape[:index_depth]\n outer_shape = tensor.shape[index_depth:]\n assert updates.shape == [num_updates, inner_shape]\n ```\n\n For example, to update rows of a `(6, 3)` `tensor`:\n\n >>> tensor = tf.zeros([6, 3], dtype=tf.int32)\n\n Use an index depth of one.\n\n >>> indices = tf.constant([[2], [4]]) # num_updates == 2, index_depth == 1\n >>> num_updates, index_depth = indices.shape.as_list()\n\n The `outer_shape` is `6`, the inner shape is `3`:\n\n >>> outer_shape = tensor.shape[:index_depth]\n >>> inner_shape = tensor.shape[index_depth:]\n\n 2 rows are being indexed so 2 `updates` must be supplied.\n Each update must be shaped to match the `inner_shape`.\n\n >>> # num_updates == 2, inner_shape==3\n >>> updates = tf.constant([[1, 2, 3],\n ... [4, 5, 6]])\n\n Altogether this gives:\n\n >>> tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()\n array([[0, 0, 0],\n [0, 0, 0],\n [1, 2, 3],\n [0, 0, 0],\n [4, 5, 6],\n [0, 0, 0]], dtype=int32)\n\n #### More slice update examples\n\n A tensor representing a batch of uniformly sized video clips naturally has 5\n axes: `[batch_size, time, width, height, channels]`.\n\n For example:\n\n >>> batch_size, time, width, height, channels = 13,11,7,5,3\n >>> video_batch = tf.zeros([batch_size, time, width, height, channels])\n\n To replace a selection of video clips:\n * Use an `index_depth` of 1 (indexing the `outer_shape`: `[batch_size]`)\n * Provide updates each with a shape matching the `inner_shape`:\n `[time, width, height, channels]`.\n\n To replace the first two clips with ones:\n\n >>> indices = [[0],[1]]\n >>> new_clips = tf.ones([2, time, width, height, channels])\n >>> tf.tensor_scatter_nd_update(video_batch, indices, new_clips)\n\n To replace a selection of frames in the videos:\n\n * `indices` must have an `index_depth` of 2 for the `outer_shape`:\n `[batch_size, time]`.\n * `updates` must be shaped like a list of images. Each update must have a\n shape, matching the `inner_shape`: `[width, height, channels]`.\n\n To replace the first frame of the first three video clips:\n\n >>> indices = [[0, 0], [1, 0], [2, 0]] # num_updates=3, index_depth=2\n >>> new_images = tf.ones([\n ... # num_updates=3, inner_shape=(width, height, channels)\n ... 3, width, height, channels])\n >>> tf.tensor_scatter_nd_update(video_batch, indices, new_images)\n\n ### Folded indices\n\n In simple cases it's convenient to think of `indices` and `updates` as\n lists, but this is not a strict requirement. Instead of a flat `num_updates`,\n the `indices` and `updates` can be folded into a `batch_shape`. This\n `batch_shape` is all axes of the `indices`, except for the innermost\n `index_depth` axis.\n\n ```\n index_depth = indices.shape[-1]\n batch_shape = indices.shape[:-1]\n ```\n\n Note: The one exception is that the `batch_shape` cannot be `[]`. You can't\n update a single index by passing indices with shape `[index_depth]`.\n\n `updates` must have a matching `batch_shape` (the axes before `inner_shape`).\n\n ```\n assert updates.shape == batch_shape + inner_shape\n ```\n\n Note: The result is equivalent to flattening the `batch_shape` axes of\n `indices` and `updates`. This generalization just avoids the need\n for reshapes when it is more natural to construct \"folded\" indices and\n updates.\n\n With this generalization the full shape constraints are:\n\n ```\n assert tf.rank(indices) >= 2\n index_depth = indices.shape[-1]\n batch_shape = indices.shape[:-1]\n assert index_depth <= tf.rank(tensor)\n outer_shape = tensor.shape[:index_depth]\n inner_shape = tensor.shape[index_depth:]\n assert updates.shape == batch_shape + inner_shape\n ```\n\n For example, to draw an `X` on a `(5,5)` matrix start with these indices:\n\n >>> tensor = tf.zeros([5,5])\n >>> indices = tf.constant([\n ... [[0,0],\n ... [1,1],\n ... [2,2],\n ... [3,3],\n ... [4,4]],\n ... [[0,4],\n ... [1,3],\n ... [2,2],\n ... [3,1],\n ... [4,0]],\n ... ])\n >>> indices.shape.as_list() # batch_shape == [2, 5], index_depth == 2\n [2, 5, 2]\n\n Here the `indices` do not have a shape of `[num_updates, index_depth]`, but a\n shape of `batch_shape+[index_depth]`.\n\n Since the `index_depth` is equal to the rank of `tensor`:\n\n * `outer_shape` is `(5,5)`\n * `inner_shape` is `()` - each update is scalar\n * `updates.shape` is `batch_shape + inner_shape == (5,2) + ()`\n\n >>> updates = [\n ... [1,1,1,1,1],\n ... [1,1,1,1,1],\n ... ]\n\n Putting this together gives:\n\n >>> tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()\n array([[1., 0., 0., 0., 1.],\n [0., 1., 0., 1., 0.],\n [0., 0., 1., 0., 0.],\n [0., 1., 0., 1., 0.],\n [1., 0., 0., 0., 1.]], dtype=float32)\n\n Args:\n tensor: Tensor to copy/update.\n indices: Indices to update.\n updates: Updates to apply at the indices.\n name: Optional name for the operation.\n\n Returns:\n A new tensor with the given shape and updates applied according to the\n indices.\n "}, {"name": "tensordot", "path": "./tf/tensordot.md", "desc": "Tensor contraction of a and b along specified axes and outer product.", "type": "Functions", "docs": "Tensor contraction of a and b along specified axes and outer product.\n\n Tensordot (also known as tensor contraction) sums the product of elements\n from `a` and `b` over the indices specified by `axes`.\n\n This operation corresponds to `numpy.tensordot(a, b, axes)`.\n\n Example 1: When `a` and `b` are matrices (order 2), the case `axes=1`\n is equivalent to matrix multiplication.\n\n Example 2: When `a` and `b` are matrices (order 2), the case\n `axes = [[1], [0]]` is equivalent to matrix multiplication.\n\n Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives\n the outer product, a tensor of order 4.\n\n Example 4: Suppose that \\\\(a_{ijk}\\\\) and \\\\(b_{lmn}\\\\) represent two\n tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor\n \\\\(c_{jklm}\\\\) whose entry\n corresponding to the indices \\\\((j,k,l,m)\\\\) is given by:\n\n \\\\( c_{jklm} = \\sum_i a_{ijk} b_{lmi} \\\\).\n\n In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.\n\n Args:\n a: `Tensor` of type `float32` or `float64`.\n b: `Tensor` with the same type as `a`.\n axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].\n If axes is a scalar, sum over the last N axes of a and the first N axes of\n b in order. If axes is a list or `Tensor` the first and second row contain\n the set of unique integers specifying axes along which the contraction is\n computed, for `a` and `b`, respectively. The number of axes for `a` and\n `b` must be equal. If `axes=0`, computes the outer product between `a` and\n `b`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `a`.\n\n Raises:\n ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.\n IndexError: If the values in axes exceed the rank of the corresponding\n tensor.\n "}, {"name": "tile", "path": "./tf/tile.md", "desc": "Constructs a tensor by tiling a given tensor.", "type": "Functions", "docs": "Constructs a tensor by tiling a given tensor.\n\n This operation creates a new tensor by replicating `input` `multiples` times.\n The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,\n and the values of `input` are replicated `multiples[i]` times along the 'i'th\n dimension. For example, tiling `[a b c d]` by `[2]` produces\n `[a b c d a b c d]`.\n\n >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)\n >>> b = tf.constant([1,2], tf.int32)\n >>> tf.tile(a, b)\n \n >>> c = tf.constant([2,1], tf.int32)\n >>> tf.tile(a, c)\n \n >>> d = tf.constant([2,2], tf.int32)\n >>> tf.tile(a, d)\n \n\n Args:\n input: A `Tensor`. 1-D or higher.\n multiples: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 1-D. Length must be the same as the number of dimensions in `input`\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n "}, {"name": "timestamp", "path": "./tf/timestamp.md", "desc": "Provides the time since epoch in seconds.", "type": "Functions", "docs": "Provides the time since epoch in seconds.\n\n Returns the timestamp as a `float64` for seconds since the Unix epoch.\n\n Note: the timestamp is computed when the op is executed, not when it is added\n to the graph.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float64`.\n "}, {"name": "transpose", "path": "./tf/transpose.md", "desc": "Transposes `a`, where `a` is a Tensor.", "type": "Functions", "docs": "Transposes `a`, where `a` is a Tensor.\n\n Permutes the dimensions according to the value of `perm`.\n\n The returned tensor's dimension `i` will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank\n of the input tensor. Hence by default, this operation performs a regular\n matrix transpose on 2-D input Tensors.\n\n If conjugate is `True` and `a.dtype` is either `complex64` or `complex128`\n then the values of `a` are conjugated and transposed.\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, so `transpose` returns a new tensor with\n the items permuted.\n @end_compatibility\n\n For example:\n\n >>> x = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.transpose(x)\n \n\n Equivalently, you could call `tf.transpose(x, perm=[1, 0])`.\n\n If `x` is complex, setting conjugate=True gives the conjugate transpose:\n\n >>> x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n ... [4 + 4j, 5 + 5j, 6 + 6j]])\n >>> tf.transpose(x, conjugate=True)\n \n\n 'perm' is more useful for n-dimensional tensors where n > 2:\n\n >>> x = tf.constant([[[ 1, 2, 3],\n ... [ 4, 5, 6]],\n ... [[ 7, 8, 9],\n ... [10, 11, 12]]])\n\n As above, simply calling `tf.transpose` will default to `perm=[2,1,0]`.\n\n To take the transpose of the matrices in dimension-0 (such as when you are\n transposing matrices where 0 is the batch dimension), you would set\n `perm=[0,2,1]`.\n\n >>> tf.transpose(x, perm=[0, 2, 1])\n \n\n Note: This has a shorthand `linalg.matrix_transpose`):\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`. This should be a vector.\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.math.conj(tf.transpose(input)).\n name: A name for the operation (optional).\n\n Returns:\n A transposed `Tensor`.\n "}, {"name": "truediv", "path": "./tf/math/truediv.md", "desc": "Divides x / y elementwise (using Python 3 division operator semantics", "type": "Functions", "docs": "Divides x / y elementwise (using Python 3 division operator semantics).\n\n NOTE: Prefer using the Tensor operator or tf.divide which obey Python\n division operator semantics.\n\n This function forces Python 3 division operator semantics where all integer\n arguments are cast to floating types first. This op is generated by normal\n `x / y` division in Python 3 and in Python 2.7 with\n `from __future__ import division`. If you want integer division that rounds\n down, use `x // y` or `tf.math.floordiv`.\n\n `x` and `y` must have the same numeric type. If the inputs are floating\n point, the output will have the same type. If the inputs are integral, the\n inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n and `int64` (matching the behavior of Numpy).\n\n Args:\n x: `Tensor` numerator of numeric type.\n y: `Tensor` denominator of numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` evaluated in floating point.\n\n Raises:\n TypeError: If `x` and `y` have different dtypes.\n "}, {"name": "truncatediv", "path": "./tf/truncatediv.md", "desc": "Returns x / y element-wise for integer types.", "type": "Functions", "docs": "Returns x / y element-wise for integer types.\n\n Truncation designates that negative numbers will round fractional quantities\n toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different\n than Python semantics. See `FloorDiv` for a division function that matches\n Python Semantics.\n\n *NOTE*: `truncatediv` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Args:\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `uint32`, `uint64`, `int64`, `complex64`, `complex128`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "truncatemod", "path": "./tf/truncatemod.md", "desc": "Returns element-wise remainder of division. This emulates C semantics in that", "type": "Functions", "docs": "Returns element-wise remainder of division. This emulates C semantics in that\n\n the result here is consistent with a truncating divide. E.g. `truncate(x / y) *\n y + truncate_mod(x, y) = x`.\n\n *NOTE*: `truncatemod` supports broadcasting. More about broadcasting\n [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n Args:\n x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n "}, {"name": "tuple", "path": "./tf/tuple.md", "desc": "Groups tensors together.", "type": "Functions", "docs": "Groups tensors together.\n\n The returned tensors have the same value as the input tensors, but they\n are computed only after all the input tensors have been computed.\n\n Note: *In TensorFlow 2 with eager and/or Autograph, you should not require\n this method, as ops execute in the expected order thanks to automatic control\n dependencies.* Only use `tf.tuple` when working with v1 `tf.Graph` code.\n\n See also `tf.group` and `tf.control_dependencies`.\n\n Args:\n tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.\n control_inputs: List of additional ops to finish before returning.\n name: (optional) A name to use as a `name_scope` for the operation.\n\n Returns:\n Same as `tensors`.\n\n Raises:\n ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.\n TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`\n objects.\n\n "}, {"name": "type_spec_from_value", "path": "./tf/type_spec_from_value.md", "desc": "Returns a tf.TypeSpec that represents the given `value`.", "type": "Functions", "docs": "Returns a `tf.TypeSpec` that represents the given `value`.\n\n Examples:\n\n >>> tf.type_spec_from_value(tf.constant([1, 2, 3]))\n TensorSpec(shape=(3,), dtype=tf.int32, name=None)\n >>> tf.type_spec_from_value(np.array([4.0, 5.0], np.float64))\n TensorSpec(shape=(2,), dtype=tf.float64, name=None)\n >>> tf.type_spec_from_value(tf.ragged.constant([[1, 2], [3, 4, 5]]))\n RaggedTensorSpec(TensorShape([2, None]), tf.int32, 1, tf.int64)\n\n >>> example_input = tf.ragged.constant([[1, 2], [3]])\n >>> @tf.function(input_signature=[tf.type_spec_from_value(example_input)])\n ... def f(x):\n ... return tf.reduce_sum(x, axis=1)\n\n Args:\n value: A value that can be accepted or returned by TensorFlow APIs. Accepted\n types for `value` include `tf.Tensor`, any value that can be converted to\n `tf.Tensor` using `tf.convert_to_tensor`, and any subclass of\n `CompositeTensor` (such as `tf.RaggedTensor`).\n\n Returns:\n A `TypeSpec` that is compatible with `value`.\n\n Raises:\n TypeError: If a TypeSpec cannot be built for `value`, because its type\n is not supported.\n "}, {"name": "unique", "path": "./tf/unique.md", "desc": "Finds unique elements in a 1-D tensor.", "type": "Functions", "docs": "Finds unique elements in a 1-D tensor.\n\n This operation returns a tensor `y` containing all of the unique elements of `x`\n sorted in the same order that they occur in `x`; `x` does not need to be sorted.\n This operation also returns a tensor `idx` the same size as `x` that contains\n the index of each value of `x` in the unique output `y`. In other words:\n\n `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\n Examples:\n\n ```\n # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\n y, idx = unique(x)\n y ==> [1, 2, 4, 7, 8]\n idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\n ```\n\n ```\n # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]\n y, idx = unique(x)\n y ==> [4, 5, 1, 2, 3]\n idx ==> [0, 1, 2, 3, 4, 4, 0, 1]\n ```\n\n Args:\n x: A `Tensor`. 1-D.\n out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (y, idx).\n\n y: A `Tensor`. Has the same type as `x`.\n idx: A `Tensor` of type `out_idx`.\n "}, {"name": "unique_with_counts", "path": "./tf/unique_with_counts.md", "desc": "Finds unique elements in a 1-D tensor.", "type": "Functions", "docs": "Finds unique elements in a 1-D tensor.\n\n This operation returns a tensor `y` containing all of the unique elements of `x`\n sorted in the same order that they occur in `x`. This operation also returns a\n tensor `idx` the same size as `x` that contains the index of each value of `x`\n in the unique output `y`. Finally, it returns a third tensor `count` that\n contains the count of each element of `y` in `x`. In other words:\n\n `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\n For example:\n\n ```\n # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\n y, idx, count = unique_with_counts(x)\n y ==> [1, 2, 4, 7, 8]\n idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\n count ==> [2, 1, 3, 1, 2]\n ```\n\n Args:\n x: A `Tensor`. 1-D.\n out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (y, idx, count).\n\n y: A `Tensor`. Has the same type as `x`.\n idx: A `Tensor` of type `out_idx`.\n count: A `Tensor` of type `out_idx`.\n "}, {"name": "unravel_index", "path": "./tf/unravel_index.md", "desc": "Converts an array of flat indices into a tuple of coordinate arrays.", "type": "Functions", "docs": "Converts an array of flat indices into a tuple of coordinate arrays.\n\n \n Example:\n\n ```\n y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])\n # 'dims' represent a hypothetical (3, 3) tensor of indices:\n # [[0, 1, *2*],\n # [3, 4, *5*],\n # [6, *7*, 8]]\n # For each entry from 'indices', this operation returns\n # its coordinates (marked with '*'), such as\n # 2 ==> (0, 2)\n # 5 ==> (1, 2)\n # 7 ==> (2, 1)\n y ==> [[0, 1, 2], [2, 2, 1]]\n ```\n\n @compatibility(numpy)\n Equivalent to np.unravel_index\n @end_compatibility\n\n Args:\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n An 0-D or 1-D `int` Tensor whose elements are indices into the\n flattened version of an array of dimensions dims.\n dims: A `Tensor`. Must have the same type as `indices`.\n An 1-D `int` Tensor. The shape of the array to use for unraveling\n indices.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `indices`.\n "}, {"name": "unstack", "path": "./tf/unstack.md", "desc": "Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1", "type": "Functions", "docs": "Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.\n\n Unpacks tensors from `value` by chipping it along the `axis` dimension.\n\n >>> x = tf.reshape(tf.range(12), (3,4))\n >>>\n >>> p, q, r = tf.unstack(x)\n >>> p.shape.as_list()\n [4]\n\n >>> i, j, k, l = tf.unstack(x, axis=1)\n >>> i.shape.as_list()\n [3]\n\n This is the opposite of stack.\n\n >>> x = tf.stack([i, j, k, l], axis=1)\n\n More generally if you have a tensor of shape `(A, B, C, D)`:\n\n >>> A, B, C, D = [2, 3, 4, 5]\n >>> t = tf.random.normal(shape=[A, B, C, D])\n\n The number of tensor returned is equal to the length of the target `axis`:\n\n >>> axis = 2\n >>> items = tf.unstack(t, axis=axis)\n >>> len(items) == t.shape[axis]\n True\n\n The shape of each result tensor is equal to the shape of the input tensor,\n with the target `axis` removed.\n\n >>> items[0].shape.as_list() # [A, B, D]\n [2, 3, 5]\n\n The value of each tensor `items[i]` is equal to the slice of `input` across\n `axis` at index `i`:\n\n >>> for i in range(len(items)):\n ... slice = t[:,:,i,:]\n ... assert tf.reduce_all(slice == items[i])\n\n #### Python iterable unpacking\n\n With eager execution you _can_ unstack the 0th axis of a tensor using python's\n iterable unpacking:\n\n >>> t = tf.constant([1,2,3])\n >>> a,b,c = t\n\n `unstack` is still necessary because Iterable unpacking doesn't work in\n a `@tf.function`: Symbolic tensors are not iterable.\n\n You need to use `tf.unstack` here:\n\n >>> @tf.function\n ... def bad(t):\n ... a,b,c = t\n ... return a\n >>>\n >>> bad(t)\n Traceback (most recent call last):\n ...\n OperatorNotAllowedInGraphError: ...\n\n >>> @tf.function\n ... def good(t):\n ... a,b,c = tf.unstack(t)\n ... return a\n >>>\n >>> good(t).numpy()\n 1\n\n #### Unknown shapes\n\n Eager tensors have concrete values, so their shape is always known.\n Inside a `tf.function` the symbolic tensors may have unknown shapes.\n If the length of `axis` is unknown `tf.unstack` will fail because it cannot\n handle an unknown number of tensors:\n\n >>> @tf.function(input_signature=[tf.TensorSpec([None], tf.float32)])\n ... def bad(t):\n ... tensors = tf.unstack(t)\n ... return tensors[0]\n >>>\n >>> bad(tf.constant([1,2,3]))\n Traceback (most recent call last):\n ...\n ValueError: Cannot infer argument `num` from shape (None,)\n\n If you know the `axis` length you can pass it as the `num` argument. But this\n must be a constant value.\n\n If you actually need a variable number of tensors in a single `tf.function`\n trace, you will need to use exlicit loops and a `tf.TensorArray` instead.\n\n Args:\n value: A rank `R > 0` `Tensor` to be unstacked.\n num: An `int`. The length of the dimension `axis`. Automatically inferred if\n `None` (the default).\n axis: An `int`. The axis to unstack along. Defaults to the first dimension.\n Negative values wrap around, so the valid range is `[-R, R)`.\n name: A name for the operation (optional).\n\n Returns:\n The list of `Tensor` objects unstacked from `value`.\n\n Raises:\n ValueError: If `axis` is out of the range `[-R, R)`.\n ValueError: If `num` is unspecified and cannot be inferred.\n InvalidArgumentError: If `num` does not match the shape of `value`.\n "}, {"name": "variable_creator_scope", "path": "./tf/variable_creator_scope.md", "desc": "Scope which defines a variable creation function to be used by variable(", "type": "Functions", "docs": "Scope which defines a variable creation function to be used by variable().\n\n variable_creator is expected to be a function with the following signature:\n\n ```\n def variable_creator(next_creator, **kwargs)\n ```\n\n The creator is supposed to eventually call the next_creator to create a\n variable if it does want to create a variable and not call Variable or\n ResourceVariable directly. This helps make creators composable. A creator may\n choose to create multiple variables, return already existing variables, or\n simply register that a variable was created and defer to the next creators in\n line. Creators can also modify the keyword arguments seen by the next\n creators.\n\n Custom getters in the variable scope will eventually resolve down to these\n custom creators when they do create variables.\n\n The valid keyword arguments in kwds are:\n\n * initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n * trainable: If `True`, the default, GradientTapes automatically watch\n uses of this Variable.\n * validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n * caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device.\n If not `None`, caches on another device. Typical use is to cache\n on the device where the Ops using the Variable reside, to deduplicate\n copying through `Switch` and other conditional statements.\n * name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n dtype: If set, initial_value will be converted to the given type.\n If `None`, either the datatype will be kept (if `initial_value` is\n a Tensor), or `convert_to_tensor` will decide.\n * constraint: A constraint function to be applied to the variable after\n updates by some algorithms.\n * synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize.\n * aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n\n This set may grow over time, so it's important the signature of creators is as\n mentioned above.\n\n Args:\n variable_creator: the passed creator\n\n Yields:\n A scope in which the creator is active\n "}, {"name": "vectorized_map", "path": "./tf/vectorized_map.md", "desc": "Parallel map on the list of tensors unpacked from `elems` on dimension 0.", "type": "Functions", "docs": "Parallel map on the list of tensors unpacked from `elems` on dimension 0.\n\n This method works similar to `tf.map_fn` but is optimized to run much faster,\n possibly with a much larger memory footprint. The speedups are obtained by\n vectorization (see [Auto-Vectorizing TensorFlow Graphs: Jacobians,\n Auto-Batching and Beyond](https://arxiv.org/pdf/1903.04243.pdf)). The idea\n behind vectorization is to semantically launch all the invocations of `fn` in\n parallel and fuse corresponding operations across all these invocations. This\n fusion is done statically at graph generation time and the generated code is\n often similar in performance to a manually fused version.\n\n Because `tf.vectorized_map` fully parallelizes the batch, this method will\n generally be significantly faster than using `tf.map_fn`, especially in eager\n mode. However this is an experimental feature and currently has a lot of\n limitations:\n - There should be no data dependency between the different semantic\n invocations of `fn`, i.e. it should be safe to map the elements of the\n inputs in any order.\n - Stateful kernels may mostly not be supported since these often imply a\n data dependency. We do support a limited set of such stateful kernels\n though (like RandomFoo, Variable operations like reads, etc).\n - `fn` has limited support for control flow operations.\n - `fn` should return nested structure of Tensors or Operations. However\n if an Operation is returned, it should have zero outputs.\n - The shape and dtype of any intermediate or output tensors in the\n computation of `fn` should not depend on the input to `fn`.\n\n Examples:\n ```python\n def outer_product(a):\n return tf.tensordot(a, a, 0)\n\n batch_size = 100\n a = tf.ones((batch_size, 32, 32))\n c = tf.vectorized_map(outer_product, a)\n assert c.shape == (batch_size, 32, 32, 32, 32)\n ```\n\n ```python\n # Computing per-example gradients\n\n batch_size = 10\n num_features = 32\n layer = tf.keras.layers.Dense(1)\n\n def model_fn(arg):\n with tf.GradientTape() as g:\n inp, label = arg\n inp = tf.expand_dims(inp, 0)\n label = tf.expand_dims(label, 0)\n prediction = layer(inp)\n loss = tf.nn.l2_loss(label - prediction)\n return g.gradient(loss, (layer.kernel, layer.bias))\n\n inputs = tf.random.uniform([batch_size, num_features])\n labels = tf.random.uniform([batch_size, 1])\n per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels))\n assert per_example_gradients[0].shape == (batch_size, num_features, 1)\n assert per_example_gradients[1].shape == (batch_size, 1)\n ```\n\n Args:\n fn: The callable to be performed. It accepts one argument, which will have\n the same (possibly nested) structure as `elems`, and returns a possibly\n nested structure of Tensors and Operations, which may be different than\n the structure of `elems`.\n elems: A tensor or (possibly nested) sequence of tensors, each of which will\n be unpacked along their first dimension. The nested sequence of the\n resulting slices will be mapped over by `fn`. The first dimensions of all\n elements must broadcast to a consistent value; equivalently, each\n element tensor must have first dimension of either `B` or `1`, for some\n common batch size `B >= 1`.\n fallback_to_while_loop: If true, on failing to vectorize an operation,\n the unsupported op is wrapped in a tf.while_loop to execute the map\n iterations. Note that this fallback only happens for unsupported ops and\n other parts of `fn` are still vectorized. If false, on encountering an\n unsupported op, a ValueError is thrown. Note that the fallbacks can result\n in slowdowns since vectorization often yields speedup of one to two orders\n of magnitude.\n\n Returns:\n A tensor or (possibly nested) sequence of tensors. Each tensor packs the\n results of applying fn to tensors unpacked from elems along the first\n dimension, from first to last.\n\n Although they are less common as user-visible inputs and outputs, note that\n tensors of type `tf.variant` which represent tensor lists (for example from\n `tf.raw_ops.TensorListFromTensor`) are vectorized by stacking the list\n contents rather than the variant itself, and so the container tensor will\n have a scalar shape when returned rather than the usual stacked shape. This\n improves the performance of control flow gradient vectorization.\n\n Raises:\n ValueError: If vectorization fails and fallback_to_while_loop is False.\n "}, {"name": "where", "path": "./tf/where.md", "desc": "Returns the indices of non-zero elements, or multiplexes `x` and `y`.", "type": "Functions", "docs": "Returns the indices of non-zero elements, or multiplexes `x` and `y`.\n\n This operation has two modes:\n\n 1. **Return the indices of non-zero elements** - When only\n `condition` is provided the result is an `int64` tensor where each row is\n the index of a non-zero element of `condition`. The result's shape\n is `[tf.math.count_nonzero(condition), tf.rank(condition)]`.\n 2. **Multiplex `x` and `y`** - When both `x` and `y` are provided the\n result has the shape of `x`, `y`, and `condition` broadcast together. The\n result is taken from `x` where `condition` is non-zero\n or `y` where `condition` is zero.\n\n #### 1. Return the indices of non-zero elements\n\n Note: In this mode `condition` can have a dtype of `bool` or any numeric\n dtype.\n\n If `x` and `y` are not provided (both are None):\n\n `tf.where` will return the indices of `condition` that are non-zero,\n in the form of a 2-D tensor with shape `[n, d]`, where `n` is the number of\n non-zero elements in `condition` (`tf.count_nonzero(condition)`), and `d` is\n the number of axes of `condition` (`tf.rank(condition)`).\n\n Indices are output in row-major order. The `condition` can have a `dtype` of\n `tf.bool`, or any numeric `dtype`.\n\n Here `condition` is a 1-axis `bool` tensor with 2 `True` values. The result\n has a shape of `[2,1]`\n\n >>> tf.where([True, False, False, True]).numpy()\n array([[0],\n [3]])\n\n Here `condition` is a 2-axis integer tensor, with 3 non-zero values. The\n result has a shape of `[3, 2]`.\n\n >>> tf.where([[1, 0, 0], [1, 0, 1]]).numpy()\n array([[0, 0],\n [1, 0],\n [1, 2]])\n\n Here `condition` is a 3-axis float tensor, with 5 non-zero values. The output\n shape is `[5, 3]`.\n\n >>> float_tensor = [[[0.1, 0], [0, 2.2], [3.5, 1e6]],\n ... [[0, 0], [0, 0], [99, 0]]]\n >>> tf.where(float_tensor).numpy()\n array([[0, 0, 0],\n [0, 1, 1],\n [0, 2, 0],\n [0, 2, 1],\n [1, 2, 0]])\n\n These indices are the same that `tf.sparse.SparseTensor` would use to\n represent the condition tensor:\n\n >>> sparse = tf.sparse.from_dense(float_tensor)\n >>> sparse.indices.numpy()\n array([[0, 0, 0],\n [0, 1, 1],\n [0, 2, 0],\n [0, 2, 1],\n [1, 2, 0]])\n\n A complex number is considered non-zero if either the real or imaginary\n component is non-zero:\n\n >>> tf.where([complex(0.), complex(1.), 0+1j, 1+1j]).numpy()\n array([[1],\n [2],\n [3]])\n\n #### 2. Multiplex `x` and `y`\n\n Note: In this mode `condition` must have a dtype of `bool`.\n\n If `x` and `y` are also provided (both have non-None values) the `condition`\n tensor acts as a mask that chooses whether the corresponding\n element / row in the output should be taken from `x` (if the element in\n `condition` is `True`) or `y` (if it is `False`).\n\n The shape of the result is formed by\n [broadcasting](https://docs.scipy.org/doc/numpy/reference/ufuncs.html)\n together the shapes of `condition`, `x`, and `y`.\n\n When all three inputs have the same size, each is handled element-wise.\n\n >>> tf.where([True, False, False, True],\n ... [1, 2, 3, 4],\n ... [100, 200, 300, 400]).numpy()\n array([ 1, 200, 300, 4], dtype=int32)\n\n There are two main rules for broadcasting:\n\n 1. If a tensor has fewer axes than the others, length-1 axes are added to the\n left of the shape.\n 2. Axes with length-1 are streched to match the coresponding axes of the other\n tensors.\n\n A length-1 vector is streched to match the other vectors:\n\n >>> tf.where([True, False, False, True], [1, 2, 3, 4], [100]).numpy()\n array([ 1, 100, 100, 4], dtype=int32)\n\n A scalar is expanded to match the other arguments:\n\n >>> tf.where([[True, False], [False, True]], [[1, 2], [3, 4]], 100).numpy()\n array([[ 1, 100], [100, 4]], dtype=int32)\n >>> tf.where([[True, False], [False, True]], 1, 100).numpy()\n array([[ 1, 100], [100, 1]], dtype=int32)\n\n A scalar `condition` returns the complete `x` or `y` tensor, with\n broadcasting applied.\n\n >>> tf.where(True, [1, 2, 3, 4], 100).numpy()\n array([1, 2, 3, 4], dtype=int32)\n >>> tf.where(False, [1, 2, 3, 4], 100).numpy()\n array([100, 100, 100, 100], dtype=int32)\n\n For a non-trivial example of broadcasting, here `condition` has a shape of\n `[3]`, `x` has a shape of `[3,3]`, and `y` has a shape of `[3,1]`.\n Broadcasting first expands the shape of `condition` to `[1,3]`. The final\n broadcast shape is `[3,3]`. `condition` will select columns from `x` and `y`.\n Since `y` only has one column, all columns from `y` will be identical.\n\n >>> tf.where([True, False, True],\n ... x=[[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9]],\n ... y=[[100],\n ... [200],\n ... [300]]\n ... ).numpy()\n array([[ 1, 100, 3],\n [ 4, 200, 6],\n [ 7, 300, 9]], dtype=int32)\n\n Note that if the gradient of either branch of the `tf.where` generates\n a `NaN`, then the gradient of the entire `tf.where` will be `NaN`. This is\n because the gradient calculation for `tf.where` combines the two branches, for\n performance reasons.\n\n A workaround is to use an inner `tf.where` to ensure the function has\n no asymptote, and to avoid computing a value whose gradient is `NaN` by\n replacing dangerous inputs with safe inputs.\n\n Instead of this,\n\n >>> x = tf.constant(0., dtype=tf.float32)\n >>> with tf.GradientTape() as tape:\n ... tape.watch(x)\n ... y = tf.where(x < 1., 0., 1. / x)\n >>> print(tape.gradient(y, x))\n tf.Tensor(nan, shape=(), dtype=float32)\n\n Although, the `1. / x` values are never used, its gradient is a `NaN` when\n `x = 0`. Instead, we should guard that with another `tf.where`\n\n >>> x = tf.constant(0., dtype=tf.float32)\n >>> with tf.GradientTape() as tape:\n ... tape.watch(x)\n ... safe_x = tf.where(tf.equal(x, 0.), 1., x)\n ... y = tf.where(x < 1., 0., 1. / safe_x)\n >>> print(tape.gradient(y, x))\n tf.Tensor(0.0, shape=(), dtype=float32)\n\n See also:\n\n * `tf.sparse` - The indices returned by the first form of `tf.where` can be\n useful in `tf.sparse.SparseTensor` objects.\n * `tf.gather_nd`, `tf.scatter_nd`, and related ops - Given the\n list of indices returned from `tf.where` the `scatter` and `gather` family\n of ops can be used fetch values or insert values at those indices.\n * `tf.strings.length` - `tf.string` is not an allowed dtype for the\n `condition`. Use the string length instead.\n\n Args:\n condition: A `tf.Tensor` of dtype bool, or any numeric dtype. `condition`\n must have dtype `bool` when `x` and `y` are provided.\n x: If provided, a Tensor which is of the same type as `y`, and has a shape\n broadcastable with `condition` and `y`.\n y: If provided, a Tensor which is of the same type as `x`, and has a shape\n broadcastable with `condition` and `x`.\n name: A name of the operation (optional).\n\n Returns:\n If `x` and `y` are provided:\n A `Tensor` with the same type as `x` and `y`, and shape that\n is broadcast from `condition`, `x`, and `y`.\n Otherwise, a `Tensor` with shape `[tf.math.count_nonzero(condition),\n tf.rank(condition)]`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-None, or the shapes\n are not all broadcastable.\n "}, {"name": "while_loop", "path": "./tf/while_loop.md", "desc": "Repeat `body` while the condition `cond` is true. (deprecated argument values", "type": "Functions", "docs": "Repeat `body` while the condition `cond` is true. (deprecated argument values)\n\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(back_prop=False)`. They will be removed in a future version.\nInstructions for updating:\nback_prop=False is deprecated. Consider using tf.stop_gradient instead.\nInstead of:\nresults = tf.while_loop(c, b, vars, back_prop=False)\nUse:\nresults = tf.nest.map_structure(tf.stop_gradient, tf.while_loop(c, b, vars))\n\n`cond` is a callable returning a boolean scalar tensor. `body` is a callable\nreturning a (possibly nested) tuple, namedtuple or list of tensors of the same\narity (length and structure) and types as `loop_vars`. `loop_vars` is a\n(possibly nested) tuple, namedtuple or list of tensors that is passed to both\n`cond` and `body`. `cond` and `body` both take as many arguments as there are\n`loop_vars`.\n\nIn addition to regular Tensors or IndexedSlices, the body may accept and\nreturn TensorArray objects. The flows of the TensorArray objects will\nbe appropriately forwarded between loops and during gradient calculations.\n\nNote that `while_loop` calls `cond` and `body` *exactly once* (inside the\ncall to `while_loop`, and not at all during `Session.run()`). `while_loop`\nstitches together the graph fragments created during the `cond` and `body`\ncalls with some additional graph nodes to create the graph flow that\nrepeats `body` until `cond` returns false.\n\nFor correctness, `tf.while_loop()` strictly enforces shape invariants for\nthe loop variables. A shape invariant is a (possibly partial) shape that\nis unchanged across the iterations of the loop. An error will be raised\nif the shape of a loop variable after an iteration is determined to be more\ngeneral than or incompatible with its shape invariant. For example, a shape\nof [11, None] is more general than a shape of [11, 17], and [11, 21] is not\ncompatible with [11, 17]. By default (if the argument `shape_invariants` is\nnot specified), it is assumed that the initial shape of each tensor in\n`loop_vars` is the same in every iteration. The `shape_invariants` argument\nallows the caller to specify a less specific shape invariant for each loop\nvariable, which is needed if the shape varies between iterations. The\n`tf.Tensor.set_shape`\nfunction may also be used in the `body` function to indicate that\nthe output loop variable has a particular shape. The shape invariant for\nSparseTensor and IndexedSlices are treated specially as follows:\n\na) If a loop variable is a SparseTensor, the shape invariant must be\nTensorShape([r]) where r is the rank of the dense tensor represented\nby the sparse tensor. It means the shapes of the three tensors of the\nSparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here\nis the shape of the SparseTensor.dense_shape property. It must be the shape of\na vector.\n\nb) If a loop variable is an IndexedSlices, the shape invariant must be\na shape invariant of the values tensor of the IndexedSlices. It means\nthe shapes of the three tensors of the IndexedSlices are (shape, [shape[0]],\n[shape.ndims]).\n\n`while_loop` implements non-strict semantics, enabling multiple iterations\nto run in parallel. The maximum number of parallel iterations can be\ncontrolled by `parallel_iterations`, which gives users some control over\nmemory consumption and execution order. For correct programs, `while_loop`\nshould return the same result for any parallel_iterations > 0.\n\nFor training, TensorFlow stores the tensors that are produced in the\nforward inference and are needed in back propagation. These tensors are a\nmain source of memory consumption and often cause OOM errors when training\non GPUs. When the flag swap_memory is true, we swap out these tensors from\nGPU to CPU. This for example allows us to train RNN models with very long\nsequences and large batches.\n\nArgs:\n cond: A callable that represents the termination condition of the loop.\n body: A callable that represents the loop body.\n loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array,\n `Tensor`, and `TensorArray` objects.\n shape_invariants: The shape invariants for the loop variables.\n parallel_iterations: The number of iterations allowed to run in parallel. It\n must be a positive integer.\n back_prop: (optional) Deprecated. False disables support for back\n propagation. Prefer using `tf.stop_gradient` instead.\n swap_memory: Whether GPU-CPU memory swap is enabled for this loop.\n maximum_iterations: Optional maximum number of iterations of the while loop\n to run. If provided, the `cond` output is AND-ed with an additional\n condition ensuring the number of iterations executed is no greater than\n `maximum_iterations`.\n name: Optional name prefix for the returned tensors.\n\nReturns:\n The output tensors for the loop variables after the loop. The return value\n has the same structure as `loop_vars`.\n\nRaises:\n TypeError: if `cond` or `body` is not callable.\n ValueError: if `loop_vars` is empty.\n\nExample:\n\n```python\ni = tf.constant(0)\nc = lambda i: tf.less(i, 10)\nb = lambda i: (tf.add(i, 1), )\nr = tf.while_loop(c, b, [i])\n```\n\nExample with nesting and a namedtuple:\n\n```python\nimport collections\nPair = collections.namedtuple('Pair', 'j, k')\nijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2)))\nc = lambda i, p: i < 10\nb = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k)))\nijk_final = tf.while_loop(c, b, ijk_0)\n```\n\nExample using shape_invariants:\n\n```python\ni0 = tf.constant(0)\nm0 = tf.ones([2, 2])\nc = lambda i, m: i < 10\nb = lambda i, m: [i+1, tf.concat([m, m], axis=0)]\ntf.while_loop(\n c, b, loop_vars=[i0, m0],\n shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])])\n```\n\nExample which demonstrates non-strict semantics: In the following\nexample, the final value of the counter `i` does not depend on `x`. So\nthe `while_loop` can increment the counter parallel to updates of `x`.\nHowever, because the loop counter at one loop iteration depends\non the value at the previous iteration, the loop counter itself cannot\nbe incremented in parallel. Hence if we just want the final value of the\ncounter (which we print on the line `print(sess.run(i))`), then\n`x` will never be incremented, but the counter will be updated on a\nsingle thread. Conversely, if we want the value of the output (which we\nprint on the line `print(sess.run(out).shape)`), then the counter may be\nincremented on its own thread, while `x` can be incremented in\nparallel on a separate thread. In the extreme case, it is conceivable\nthat the thread incrementing the counter runs until completion before\n`x` is incremented even a single time. The only thing that can never\nhappen is that the thread updating `x` can never get ahead of the\ncounter thread because the thread incrementing `x` depends on the value\nof the counter.\n\n```python\nimport tensorflow as tf\n\nn = 10000\nx = tf.constant(list(range(n)))\nc = lambda i, x: i < n\nb = lambda i, x: (tf.compat.v1.Print(i + 1, [i]), tf.compat.v1.Print(x + 1,\n[i], \"x:\"))\ni, out = tf.while_loop(c, b, (0, x))\nwith tf.compat.v1.Session() as sess:\n print(sess.run(i)) # prints [0] ... [9999]\n\n # The following line may increment the counter and x in parallel.\n # The counter thread may get ahead of the other thread, but not the\n # other way around. So you may see things like\n # [9996] x:[9987]\n # meaning that the counter thread is on iteration 9996,\n # while the other thread is on iteration 9987\n print(sess.run(out).shape)\n```"}, {"name": "zeros", "path": "./tf/zeros.md", "desc": "Creates a tensor with all elements set to zero.", "type": "Functions", "docs": "Creates a tensor with all elements set to zero.\n\n See also `tf.zeros_like`, `tf.ones`, `tf.fill`, `tf.eye`.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to zero.\n\n >>> tf.zeros([3, 4], tf.int32)\n \n\n Args:\n shape: A `list` of integers, a `tuple` of integers, or\n a 1-D `Tensor` of type `int32`.\n dtype: The DType of an element in the resulting `Tensor`.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor` with all elements set to zero.\n "}, {"name": "zeros_like", "path": "./tf/zeros_like.md", "desc": "Creates a tensor with all elements set to zero.", "type": "Functions", "docs": "Creates a tensor with all elements set to zero.\n\n See also `tf.zeros`.\n\n Given a single tensor or array-like object (`input`), this operation returns\n a tensor of the same type and shape as `input` with all elements set to zero.\n Optionally, you can use `dtype` to specify a new type for the returned tensor.\n\n Examples:\n\n >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.zeros_like(tensor)\n \n\n >>> tf.zeros_like(tensor, dtype=tf.float32)\n \n\n >>> tf.zeros_like([[1, 2, 3], [4, 5, 6]])\n \n\n Args:\n input: A `Tensor` or array-like object.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string` (optional).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n "}] \ No newline at end of file diff --git a/src/views/Home.vue b/src/views/Home.vue index 3c16f13..54436aa 100644 --- a/src/views/Home.vue +++ b/src/views/Home.vue @@ -1,20 +1,17 @@ @@ -24,11 +21,27 @@ import meta from '../meta.json'; import { marked } from 'marked'; import {$} from '../main'; -const apis = ['AggregationMethod.md', 'all_symbols.md', 'api_report.pb', 'argsort.md', 'audio.md', 'autodiff.md', 'autograph.md', 'batch_to_space.md', 'bitcast.md', 'bitwise.md', 'boolean_mask.md', 'broadcast_dynamic_shape.md', 'broadcast_static_shape.md', 'broadcast_to.md', 'case.md', 'cast.md', 'clip_by_global_norm.md', 'clip_by_norm.md', 'clip_by_value.md', 'compat.md', 'concat.md', 'cond.md', 'config.md', 'constant.md', 'constant_initializer.md', 'control_dependencies.md', 'convert_to_tensor.md', 'CriticalSection.md', 'custom_gradient.md', 'data.md', 'debugging.md', 'device.md', 'DeviceSpec.md', 'distribute.md', 'dtypes.md', 'dynamic_partition.md', 'dynamic_stitch.md', 'edit_distance.md', 'einsum.md', 'ensure_shape.md', 'errors.md', 'estimator.md', 'executing_eagerly.md', 'expand_dims.md', 'experimental.md', 'extract_volume_patches.md', 'eye.md', 'feature_column.md', 'fill.md', 'fingerprint.md', 'foldl.md', 'foldr.md', 'function.md', 'gather.md', 'gather_nd.md', 'get_current_name_scope.md', 'get_logger.md', 'get_static_value.md', 'gradients.md', 'GradientTape.md', 'grad_pass_through.md', 'Graph.md', 'graph_util.md', 'group.md', 'guarantee_const.md', 'hessians.md', 'histogram_fixed_width.md', 'histogram_fixed_width_bins.md', 'identity.md', 'identity_n.md', 'image.md', 'IndexedSlices.md', 'IndexedSlicesSpec.md', 'init_scope.md', 'inside_function.md', 'io.md', 'is_tensor.md', 'keras.md', 'linalg.md', 'linspace.md', 'lite.md', 'load_library.md', 'load_op_library.md', 'lookup.md', 'make_ndarray.md', 'make_tensor_proto.md', 'map_fn.md', 'math.md', 'meshgrid.md', 'mlir.md', 'Module.md', 'name_scope.md', 'nest.md', 'nn.md', 'nondifferentiable_batch_function.md', 'norm.md', 'no_gradient.md', 'no_op.md', 'numpy_function.md', 'ones.md', 'ones_initializer.md', 'ones_like.md', 'one_hot.md', 'Operation.md', 'OptionalSpec.md', 'pad.md', 'parallel_stack.md', 'print.md', 'profiler.md', 'py_function.md', 'quantization.md', 'queue.md', 'ragged.md', 'RaggedTensor.md', 'RaggedTensorSpec.md', 'random.md', 'random_index_shuffle.md', 'random_normal_initializer.md', 'random_uniform_initializer.md', 'range.md', 'rank.md', 'raw_ops.md', 'realdiv.md', 'recompute_grad.md', 'RegisterGradient.md', 'register_tensor_conversion_function.md', 'repeat.md', 'required_space_to_batch_paddings.md', 'reshape.md', 'reverse.md', 'reverse_sequence.md', 'roll.md', 'saved_model.md', 'scan.md', 'scatter_nd.md', 'searchsorted.md', 'sequence_mask.md', 'sets.md', 'shape.md', 'shape_n.md', 'signal.md', 'size.md', 'slice.md', 'sort.md', 'space_to_batch.md', 'space_to_batch_nd.md', 'sparse.md', 'SparseTensorSpec.md', 'split.md', 'squeeze.md', 'stack.md', 'stop_gradient.md', 'strided_slice.md', 'strings.md', 'summary.md', 'switch_case.md', 'sysconfig.md', 'Tensor.md', 'TensorArray.md', 'TensorArraySpec.md', 'tensordot.md', 'TensorShape.md', 'TensorSpec.md', 'tensor_scatter_nd_add.md', 'tensor_scatter_nd_max.md', 'tensor_scatter_nd_min.md', 'tensor_scatter_nd_sub.md', 'tensor_scatter_nd_update.md', 'test.md', 'tile.md', 'timestamp.md', 'tpu.md', 'train.md', 'transpose.md', 'truncatediv.md', 'truncatemod.md', 'tuple.md', 'types.md', 'TypeSpec.md', 'type_spec_from_value.md', 'UnconnectedGradients.md', 'unique.md', 'unique_with_counts.md', 'unravel_index.md', 'unstack.md', 'Variable.md', 'VariableAggregation.md', 'VariableSynchronization.md', 'variable_creator_scope.md', 'vectorized_map.md', 'version.md', 'where.md', 'while_loop.md', 'xla.md', 'zeros.md', 'zeros_initializer.md', 'zeros_like.md', '_api_cache.json', '_redirects.yaml', '_toc.yaml'] +interface APIEntry +{ + name: string + path: string + desc: string + type: string + docs: string +} + +function onlyUnique(value, index, self) { + return self.indexOf(value) === index; +} @Options({components: {}}) export default class Home extends Vue { + get types(): string[] + { + return meta.map(it => it.type).filter(onlyUnique) + } + mounted() { $('.api').accordion({collapsible: true, header: '.header', heightStyle: 'content', @@ -42,8 +55,9 @@ export default class Home extends Vue return marked(markdown) } - get apis() { - return meta.filter(it => it[2]) + apis(ty: string): APIEntry[] + { + return meta.filter(it => it.type == ty) } } @@ -52,6 +66,10 @@ export default class Home extends Vue #Home > * + * margin-top: 10px +.category > * + * + margin-top: 10px + + .api .header text-align: left