Outline
-
tf.norm
-
tf.reduce_min/max/mean
-
tf.argmax/argmin
-
tf.equal
-
tf.unique
Vector norm
- Eukl. Norm
[||x||_2=|sum_{k}x_k^2|^{frac{1}{2}}
]
- Max.norm
[||x||_{infty}=max_k|x_k|
]
- L1-Norm
[||x||_1=sum_{k}|x_k|
]
- Here talks about Vector Norm
Eukl. Norm
import tensorflow as tf
a = tf.ones([2, 2])
a
<tf.Tensor: id=11, shape=(2, 2), dtype=float32, numpy=
array([[1., 1.],
[1., 1.]], dtype=float32)>
tf.norm(a)
<tf.Tensor: id=7, shape=(), dtype=float32, numpy=2.0>
tf.sqrt(tf.reduce_sum(tf.square(a)))
<tf.Tensor: id=16, shape=(), dtype=float32, numpy=2.0>
a = tf.ones([4, 28, 28, 3])
a.shape
TensorShape([4, 28, 28, 3])
tf.norm(a)
<tf.Tensor: id=25, shape=(), dtype=float32, numpy=96.99484>
tf.sqrt(tf.reduce_sum(tf.square(a)))
<tf.Tensor: id=30, shape=(), dtype=float32, numpy=96.99484>
L1 Norm
b = tf.ones([2, 2])
tf.norm(b)
<tf.Tensor: id=45, shape=(), dtype=float32, numpy=2.0>
tf.norm(b, ord=2, axis=1)
<tf.Tensor: id=51, shape=(2,), dtype=float32, numpy=array([1.4142135, 1.4142135], dtype=float32)>
tf.norm(b, ord=1)
<tf.Tensor: id=56, shape=(), dtype=float32, numpy=4.0>
# 列为整体
tf.norm(b, ord=1, axis=0)
<tf.Tensor: id=66, shape=(2,), dtype=float32, numpy=array([2., 2.], dtype=float32)>
# 行为整体
tf.norm(b, ord=1, axis=1)
<tf.Tensor: id=71, shape=(2,), dtype=float32, numpy=array([2., 2.], dtype=float32)>
reduce_min/max/mean
- reduce,操作可能会有减维的功能,如[2,2],对行求max,会变成[2]
a = tf.random.normal([4, 10])
tf.reduce_min(a), tf.reduce_max(a), tf.reduce_mean(a)
(<tf.Tensor: id=80, shape=(), dtype=float32, numpy=-2.215113>,
<tf.Tensor: id=82, shape=(), dtype=float32, numpy=1.9458845>,
<tf.Tensor: id=84, shape=(), dtype=float32, numpy=-0.045550883>)
# 对某一行求max
tf.reduce_min(a, axis=1), tf.reduce_max(a, axis=1), tf.reduce_mean(a, axis=1)
(<tf.Tensor: id=98, shape=(4,), dtype=float32, numpy=array([-2.215113 , -1.5824796, -1.4861531, -1.3477703], dtype=float32)>,
<tf.Tensor: id=100, shape=(4,), dtype=float32, numpy=array([0.9380455, 1.1625607, 1.9458845, 1.492183 ], dtype=float32)>,
<tf.Tensor: id=102, shape=(4,), dtype=float32, numpy=array([-0.48791748, 0.25639585, 0.07420422, -0.02488617], dtype=float32)>)
argmax/argmin
a.shape
TensorShape([4, 10])
tf.argmax(a).shape
TensorShape([10])
# 返回index
tf.argmax(a)
<tf.Tensor: id=112, shape=(10,), dtype=int64, numpy=array([1, 1, 2, 3, 2, 1, 3, 1, 2, 1])>
# 对第1维作用
tf.argmin(a).shape
TensorShape([10])
# 对第2维作用
tf.argmin(a, axis=1).shape
TensorShape([4])
tf.equal
a = tf.constant([1, 2, 3, 2, 5])
b = tf.range(5)
tf.equal(a, b)
<tf.Tensor: id=186, shape=(5,), dtype=bool, numpy=array([False, False, False, False, False])>
res = tf.equal(a, b)
# 对True和False转换为1和0
tf.reduce_sum(tf.cast(res, dtype=tf.int32))
<tf.Tensor: id=191, shape=(), dtype=int32, numpy=0>
Accuracy
a = tf.random.normal([2, 3])
a
<tf.Tensor: id=198, shape=(2, 3), dtype=float32, numpy=
array([[ 0.25201225, -1.3897187 , 0.29240564],
[-1.0671712 , 2.1487093 , 0.690736 ]], dtype=float32)>
pred = tf.cast(tf.argmax(a, axis=1), dtype=tf.int32)
pred.shape
TensorShape([2])
y = tf.constant([2, 1])
y
<tf.Tensor: id=163, shape=(2,), dtype=int32, numpy=array([2, 1], dtype=int32)>
tf.equal(y, pred)
<tf.Tensor: id=165, shape=(2,), dtype=bool, numpy=array([ True, True])>
correct = tf.reduce_sum(tf.cast(tf.equal(y, pred), dtype=tf.int32))
correct
<tf.Tensor: id=170, shape=(), dtype=int32, numpy=2>
correct / 2
<tf.Tensor: id=175, shape=(), dtype=float64, numpy=1.0>
tf.unique
- 用于去重
a = tf.range(5)
a
<tf.Tensor: id=235, shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>
# 返回索引
tf.unique(a)
Unique(y=<tf.Tensor: id=237, shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>, idx=<tf.Tensor: id=238, shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>)
a = tf.constant([4, 2, 2, 4, 3])
a
<tf.Tensor: id=226, shape=(5,), dtype=int32, numpy=array([4, 2, 2, 4, 3], dtype=int32)>
res = tf.unique(a)
Unique(y=<tf.Tensor: id=228, shape=(3,), dtype=int32, numpy=array([4, 2, 3], dtype=int32)>, idx=<tf.Tensor: id=229, shape=(5,), dtype=int32, numpy=array([0, 1, 1, 0, 2], dtype=int32)>)