Skip to content

Commit 5abdf51

Browse files
committed
add 三、TensorFlow的层次结构
1 parent ccda7bf commit 5abdf51

10 files changed

+2508
-5
lines changed

README.md

+3-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
## eat_tensorflow2_in_30_days_ipynb
22

3-
eat_tensorflow2_in_30_days_ipynb 为[《30天吃掉那只TensorFlow2.0 》](https://github.com/lyhue1991/eat_tensorflow2_in_30_days) Jupyter Notebook 运行版本。感谢原作者[lyhue1991](https://github.com/lyhue1991)提供的教程。
3+
eat_tensorflow2_in_30_days_ipynb为[《30天吃掉那只TensorFlow2.0 》](https://github.com/lyhue1991/eat_tensorflow2_in_30_days)Jupyter Notebook 运行版本。
4+
5+
感谢原作者[lyhue1991](https://github.com/lyhue1991)提供的原教程。
46

57

68
### 1、章节目录 ⏰
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,227 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"# 3-1,低阶API示范\n",
8+
"\n",
9+
"下面的范例使用TensorFlow的低阶API实现线性回归模型。\n",
10+
"\n",
11+
"低阶API主要包括张量操作,计算图和自动微分。"
12+
]
13+
},
14+
{
15+
"cell_type": "code",
16+
"execution_count": 1,
17+
"metadata": {},
18+
"outputs": [],
19+
"source": [
20+
"import tensorflow as tf\n",
21+
"\n",
22+
"#打印时间分割线\n",
23+
"@tf.function\n",
24+
"def printbar():\n",
25+
" ts = tf.timestamp()\n",
26+
" today_ts = ts%(24*60*60)\n",
27+
"\n",
28+
" hour = tf.cast(today_ts//3600+8,tf.int32)%tf.constant(24)\n",
29+
" minite = tf.cast((today_ts%3600)//60,tf.int32)\n",
30+
" second = tf.cast(tf.floor(today_ts%60),tf.int32)\n",
31+
" \n",
32+
" def timeformat(m):\n",
33+
" if tf.strings.length(tf.strings.format(\"{}\",m))==1:\n",
34+
" return(tf.strings.format(\"0{}\",m))\n",
35+
" else:\n",
36+
" return(tf.strings.format(\"{}\",m))\n",
37+
" \n",
38+
" timestring = tf.strings.join([timeformat(hour),timeformat(minite),\n",
39+
" timeformat(second)],separator = \":\")\n",
40+
" tf.print(\"==========\"*8,end = \"\")\n",
41+
" tf.print(timestring)"
42+
]
43+
},
44+
{
45+
"cell_type": "code",
46+
"execution_count": 2,
47+
"metadata": {},
48+
"outputs": [],
49+
"source": [
50+
"# 样本数量\n",
51+
"n =400\n",
52+
"\n",
53+
"# 生成测试用数据集\n",
54+
"X = tf.random.uniform([n, 2], minval=-10, maxval=10)\n",
55+
"w0 = tf.constant([[2.0], [-1.0]])\n",
56+
"b0 = tf.constant(3.0)\n",
57+
"Y = X@w0 + b0 + tf.random.normal([n, 1], mean=0.0, stddev=2.0)# @表示矩阵乘法,增加正态扰动"
58+
]
59+
},
60+
{
61+
"cell_type": "code",
62+
"execution_count": 6,
63+
"metadata": {},
64+
"outputs": [
65+
{
66+
"name": "stdout",
67+
"output_type": "stream",
68+
"text": [
69+
"================================================================================22:49:58\n",
70+
"epoch = 1000 ; loss = 2.70946407\n",
71+
"w = [[1.98515093]\n",
72+
" [-1.03169119]]\n",
73+
"b = 2.02254653\n",
74+
"\n",
75+
"================================================================================22:50:00\n",
76+
"epoch = 2000 ; loss = 2.1017797\n",
77+
"w = [[1.99002635]\n",
78+
" [-1.02539611]]\n",
79+
"b = 2.77152085\n",
80+
"\n",
81+
"================================================================================22:50:01\n",
82+
"epoch = 3000 ; loss = 2.01909518\n",
83+
"w = [[1.99182439]\n",
84+
" [-1.02307415]]\n",
85+
"b = 3.04779363\n",
86+
"\n",
87+
"================================================================================22:50:02\n",
88+
"epoch = 4000 ; loss = 2.00784397\n",
89+
"w = [[1.99248791]\n",
90+
" [-1.02221823]]\n",
91+
"b = 3.14970684\n",
92+
"\n",
93+
"================================================================================22:50:03\n",
94+
"epoch = 5000 ; loss = 2.00631309\n",
95+
"w = [[1.99273252]\n",
96+
" [-1.02190089]]\n",
97+
"b = 3.18729782\n",
98+
"\n"
99+
]
100+
}
101+
],
102+
"source": [
103+
"# 使用动态图调试\n",
104+
"\n",
105+
"w = tf.Variable(tf.random.normal(w0.shape))\n",
106+
"b = tf.Variable(0.0)\n",
107+
"\n",
108+
"def train(epoches):\n",
109+
" for epoch in tf.range(1, epoches+1):\n",
110+
" with tf.GradientTape() as tape:\n",
111+
" # 正向传播求损失\n",
112+
" Y_hat = X@w + b\n",
113+
" loss = tf.squeeze(tf.transpose(Y-Y_hat)@(Y-Y_hat)/(2.0*n))\n",
114+
" # tf.squeeze 给定张量输入,此操作返回相同类型的张量,并删除所有尺寸为1的尺寸。\n",
115+
" # 反向传播求梯度\n",
116+
" dloss_dw, dloss_db = tape.gradient(loss, [w, b])\n",
117+
" # 梯度下降法更新参数\n",
118+
" w.assign(w - 0.001 * dloss_dw)\n",
119+
" b.assign(b - 0.001 * dloss_db)\n",
120+
" if epoch % 1000 == 0:\n",
121+
" printbar()\n",
122+
" tf.print(\"epoch =\", epoch, \"; loss =\", loss)\n",
123+
" tf.print(\"w =\", w)\n",
124+
" tf.print(\"b =\", b)\n",
125+
" tf.print(\"\")\n",
126+
"train(5000)"
127+
]
128+
},
129+
{
130+
"cell_type": "code",
131+
"execution_count": 7,
132+
"metadata": {},
133+
"outputs": [
134+
{
135+
"name": "stdout",
136+
"output_type": "stream",
137+
"text": [
138+
"================================================================================22:50:15\n",
139+
"epoch = 1000 loss = 2.70483422\n",
140+
"w = [[1.98517644]\n",
141+
" [-1.03165841]]\n",
142+
"b = 2.02645826\n",
143+
"\n",
144+
"================================================================================22:50:15\n",
145+
"epoch = 2000 loss = 2.10114884\n",
146+
"w = [[1.99003589]\n",
147+
" [-1.02538383]]\n",
148+
"b = 2.77296615\n",
149+
"\n",
150+
"================================================================================22:50:15\n",
151+
"epoch = 3000 loss = 2.0190084\n",
152+
"w = [[1.99182796]\n",
153+
" [-1.02306986]]\n",
154+
"b = 3.04833055\n",
155+
"\n",
156+
"================================================================================22:50:15\n",
157+
"epoch = 4000 loss = 2.00783205\n",
158+
"w = [[1.9924891]\n",
159+
" [-1.02221668]]\n",
160+
"b = 3.14990139\n",
161+
"\n",
162+
"================================================================================22:50:16\n",
163+
"epoch = 5000 loss = 2.00631142\n",
164+
"w = [[1.992733]\n",
165+
" [-1.0219003]]\n",
166+
"b = 3.18736935\n",
167+
"\n"
168+
]
169+
}
170+
],
171+
"source": [
172+
"##使用autograph机制转换成静态图加速\n",
173+
"w = tf.Variable(tf.random.normal(w0.shape))\n",
174+
"b = tf.Variable(0.0)\n",
175+
"\n",
176+
"@tf.function\n",
177+
"def train(epoches):\n",
178+
" for epoch in tf.range(1,epoches+1):\n",
179+
" with tf.GradientTape() as tape:\n",
180+
" #正向传播求损失\n",
181+
" Y_hat = X@w + b\n",
182+
" loss = tf.squeeze(tf.transpose(Y-Y_hat)@(Y-Y_hat))/(2.0*n) \n",
183+
"\n",
184+
" # 反向传播求梯度\n",
185+
" dloss_dw,dloss_db = tape.gradient(loss,[w,b])\n",
186+
" # 梯度下降法更新参数\n",
187+
" w.assign(w - 0.001*dloss_dw)\n",
188+
" b.assign(b - 0.001*dloss_db)\n",
189+
" if epoch%1000 == 0:\n",
190+
" printbar()\n",
191+
" tf.print(\"epoch =\",epoch,\" loss =\",loss,)\n",
192+
" tf.print(\"w =\",w)\n",
193+
" tf.print(\"b =\",b)\n",
194+
" tf.print(\"\")\n",
195+
"train(5000)"
196+
]
197+
},
198+
{
199+
"cell_type": "code",
200+
"execution_count": null,
201+
"metadata": {},
202+
"outputs": [],
203+
"source": []
204+
}
205+
],
206+
"metadata": {
207+
"kernelspec": {
208+
"display_name": "Python 3",
209+
"language": "python",
210+
"name": "python3"
211+
},
212+
"language_info": {
213+
"codemirror_mode": {
214+
"name": "ipython",
215+
"version": 3
216+
},
217+
"file_extension": ".py",
218+
"mimetype": "text/x-python",
219+
"name": "python",
220+
"nbconvert_exporter": "python",
221+
"pygments_lexer": "ipython3",
222+
"version": "3.7.2"
223+
}
224+
},
225+
"nbformat": 4,
226+
"nbformat_minor": 2
227+
}

0 commit comments

Comments
 (0)