|
16 | 16 | "metadata": {}, |
17 | 17 | "outputs": [], |
18 | 18 | "source": [ |
19 | | - "#>>>>>>>>>>>>>>>>>1、基础知识>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
| 19 | + "### 1.1.1 基础知识\n", |
20 | 20 | "#张量是具有统一类型(称为 dtype)的多维数组.您可以在 tf.dtypes.DType 中查看所有支持的 dtypes。\n", |
21 | 21 | "#如果您熟悉 NumPy,就会知道张量与 np.arrays 有一定的相似性。\n", |
22 | 22 | "#就像 Python 数值和字符串一样,所有张量都是不可变的:永远无法更新张量的内容,只能创建新的张量。\n" |
|
36 | 36 | } |
37 | 37 | ], |
38 | 38 | "source": [ |
39 | | - "#下面是一个“标量”(或称“0 秩”张量)。标量包含单个值,但没有“轴”。\n", |
| 39 | + "##下面是一个“标量”(或称“0 秩”张量)。标量包含单个值,但没有“轴”。\n", |
40 | 40 | "# This will be an int32 tensor by default; see \"dtypes\" below.\n", |
41 | 41 | "rank_0_tensor = tf.constant(4)\n", |
42 | 42 | "print(rank_0_tensor)" |
|
56 | 56 | } |
57 | 57 | ], |
58 | 58 | "source": [ |
59 | | - "#“向量”(或称“1 秩”张量)就像一个值列表。向量有 1 个轴:\n", |
| 59 | + "##“向量”(或称“1 秩”张量)就像一个值列表。向量有 1 个轴:\n", |
60 | 60 | "# Let's make this a float tensor.\n", |
61 | 61 | "rank_1_tensor = tf.constant([2.0,3.0,4.0])\n", |
62 | 62 | "print(rank_1_tensor)" |
|
79 | 79 | } |
80 | 80 | ], |
81 | 81 | "source": [ |
82 | | - "#“矩阵”(或称“2 秩”张量)有 2 个轴:\n", |
| 82 | + "##“矩阵”(或称“2 秩”张量)有 2 个轴:\n", |
83 | 83 | "# If you want to be specific, you can set the dtype (see below) at creation time\n", |
84 | 84 | "rank_2_tensor = tf.constant(\n", |
85 | 85 | " [[1,2],\n", |
|
243 | 243 | } |
244 | 244 | ], |
245 | 245 | "source": [ |
246 | | - "#各种运算都可以使用张量。\n", |
| 246 | + "##各种运算都可以使用张量。\n", |
247 | 247 | "#tf.reduce_max 是 TensorFlow 中的一个函数,用于计算张量(tensor)中的最大值。它可以沿着张量的一个或多个维度进行操作。\n", |
248 | 248 | "a = tf.constant([\n", |
249 | 249 | " [1,2,3,4,5],\n", |
|
326 | 326 | } |
327 | 327 | ], |
328 | 328 | "source": [ |
329 | | - "#>>>>>>>>>>>>>>>>>2、SHAPE>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
| 329 | + "### 1.1.2 SHAPE\n", |
330 | 330 | "\n", |
331 | 331 | "##张量有形状。下面是几个相关术语:\n", |
332 | 332 | "\n", |
|
440 | 440 | } |
441 | 441 | ], |
442 | 442 | "source": [ |
443 | | - "#>>>>>>>>>>>>>>>>>3、索引>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
444 | | - "#单轴索引\n", |
| 443 | + "### 1.1.3 索引\n", |
| 444 | + "##单轴索引\n", |
445 | 445 | "#TensorFlow 遵循标准 Python 索引编制规则(类似于在 Python 中为列表或字符串编制索引)以及 NumPy 索引编制的基本规则。\n", |
446 | 446 | "#索引从 0 开始编制\n", |
447 | 447 | "#负索引表示按倒序编制索引\n", |
|
483 | 483 | } |
484 | 484 | ], |
485 | 485 | "source": [ |
486 | | - "#多轴索引\n", |
| 486 | + "##多轴索引\n", |
487 | 487 | "#更高秩的张量通过传递多个索引来编制索引。\n", |
488 | 488 | "\n", |
489 | 489 | "#对于高秩张量的每个单独的轴,遵循与单轴情形完全相同的规则。\n", |
|
513 | 513 | } |
514 | 514 | ], |
515 | 515 | "source": [ |
516 | | - "#>>>>>>>>>>>>>>>>>4、reshape>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
517 | | - "#改变张量的形状很有用\n", |
| 516 | + "### 1.1.4 reshape\n", |
| 517 | + "##改变张量的形状很有用\n", |
518 | 518 | "# Shape returns a `TensorShape` object that shows the size along each axis\n", |
519 | 519 | "x = tf.constant([[1],[2],[3]])\n", |
520 | 520 | "print(x,\"\\n\")\n", |
|
691 | 691 | } |
692 | 692 | ], |
693 | 693 | "source": [ |
694 | | - "#>>>>>>>>>>>>>>>>>5、DTypes>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
| 694 | + "### 1.1.5 DTypes\n", |
695 | 695 | "#使用 Tensor.dtype 属性可以检查 tf.Tensor 的数据类型。\n", |
696 | 696 | "#从 Python 对象创建 tf.Tensor 时,您可以选择指定数据类型。\n", |
697 | 697 | "#如果不指定,TensorFlow 会选择一个可以表示您的数据的数据类型。TensorFlow 将 Python 整数转换为 tf.int32,\n", |
|
723 | 723 | } |
724 | 724 | ], |
725 | 725 | "source": [ |
726 | | - "#>>>>>>>>>>>>>>>>>6、广播>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
727 | | - "#广播是从 NumPy 中的等效功能借用的一个概念。简而言之,在一定条件下,对一组张量执行组合运算时,为了适应大张量,会对小张量进行“扩展”。\n", |
| 726 | + "### 1.1.6 广播\n", |
| 727 | + "#广播是从 NumPy 中的等效功能借用的一个概念。简而言之,在一定条件下,对一组张量执行组合运算时,\n", |
| 728 | + "#为了适应大张量,会对小张量进行“扩展”。\n", |
728 | 729 | "\n", |
729 | 730 | "#最简单和最常见的例子是尝试将张量与标量相乘或相加。在这种情况下会对标量进行广播,使其变成与其他参数相同的形状。\n", |
730 | 731 | "x = tf.constant([1,2,3])\n", |
|
824 | 825 | } |
825 | 826 | ], |
826 | 827 | "source": [ |
827 | | - "#>>>>>>>>>>>>>>>>>7、tf.convert_to_tensor>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
| 828 | + "### 1.1.7 tf.convert_to_tensor\n", |
828 | 829 | "#大部分运算(如 tf.matmul 和 tf.reshape)会使用 tf.Tensor 类的参数。不过,在上面的示例中,\n", |
829 | 830 | "# 您会发现形状类似于张量的 Python 对象也可以接受。\n", |
830 | 831 | "\n", |
|
843 | 844 | "metadata": {}, |
844 | 845 | "outputs": [], |
845 | 846 | "source": [ |
846 | | - "#>>>>>>>>>>>>>>>>>8、不规则张量>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
| 847 | + "### 1.1.8 不规则张量\n", |
847 | 848 | "#如果张量的某个轴上的元素个数可变,则称为“不规则”张量。对于不规则数据,请使用 tf.ragged.RaggedTensor。\n", |
848 | 849 | "#例如,下面的例子无法用规则张量表示:\n" |
849 | 850 | ] |
|
944 | 945 | } |
945 | 946 | ], |
946 | 947 | "source": [ |
947 | | - "#>>>>>>>>>>>>>>>>>9、字符串张量>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
| 948 | + "### 1.1.9 字符串张量\n", |
948 | 949 | "#tf.string 是一种 dtype,也就是说,在张量中,您可以用字符串(可变长度字节数组)来表示数据。\n", |
949 | 950 | "\n", |
950 | 951 | "#字符串是原子类型,无法像 Python 字符串一样编制索引。字符串的长度并不是张量的一个轴。\n", |
|
1117 | 1118 | "metadata": {}, |
1118 | 1119 | "outputs": [], |
1119 | 1120 | "source": [ |
1120 | | - "#>>>>>>>>>>>>>>>>>10、稀疏张量>>>>>>>>>>>>>>>>>>>>>>>>>\n", |
| 1121 | + "### 1.1.10 稀疏张量\n", |
1121 | 1122 | "#在某些情况下,数据很稀疏,比如说在一个非常宽的嵌入空间中。\n", |
1122 | 1123 | "# 为了高效存储稀疏数据,TensorFlow 支持 tf.sparse.SparseTensor 和相关运算。" |
1123 | 1124 | ] |
|
1185 | 1186 | "# You can convert sparse tensors to dense\n", |
1186 | 1187 | "tf.sparse.to_dense(sparse_tensor)\n" |
1187 | 1188 | ] |
1188 | | - }, |
1189 | | - { |
1190 | | - "cell_type": "code", |
1191 | | - "execution_count": null, |
1192 | | - "metadata": {}, |
1193 | | - "outputs": [], |
1194 | | - "source": [] |
1195 | 1189 | } |
1196 | 1190 | ], |
1197 | 1191 | "metadata": { |
|
0 commit comments