生活随笔
收集整理的這篇文章主要介紹了
TensorFlow 2.0 - CNN / 预训练 / RNN
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
文章目錄 1. CNN 卷積神經網絡 2. 預訓練模型 3. RNN 循環神經網絡
學習于:簡單粗暴 TensorFlow 2
1. CNN 卷積神經網絡
卷積神經網絡,卷積后尺寸計算
tf.keras.layers.Conv2D, tf.keras.layers.MaxPool2D
class myCNN ( tf
. keras
. Model
) : def __init__ ( self
) : super ( ) . __init__
( ) self
. conv1
= tf
. keras
. layers
. Conv2D
( filters
= 32 , kernel_size
= [ 5 , 5 ] , padding
= 'same' , activation
= 'relu' ) self
. pool1
= tf
. keras
. layers
. MaxPool2D
( pool_size
= [ 2 , 2 ] , strides
= 2 ) self
. conv2
= tf
. keras
. layers
. Conv2D
( filters
= 64 , kernel_size
= [ 5 , 5 ] , padding
= 'same' , activation
= 'relu' ) self
. pool2
= tf
. keras
. layers
. MaxPool2D
( pool_size
= [ 2 , 2 ] , strides
= 2 ) self
. flatten
= tf
. keras
. layers
. Reshape
( target_shape
= ( 7 * 7 * 64 , ) ) self
. dense1
= tf
. keras
. layers
. Dense
( units
= 1024 , activation
= 'relu' ) self
. dense2
= tf
. keras
. layers
. Dense
( units
= 10 ) def call ( self
, inputs
) : x
= self
. conv1
( inputs
) x
= self
. pool1
( x
) x
= self
. conv2
( x
) x
= self
. pool2
( x
) x
= self
. flatten
( x
) x
= self
. dense1
( x
) x
= self
. dense2
( x
) outputs
= tf
. nn
. softmax
( x
) return outputs
2. 預訓練模型
mymodel = tf.keras.applications.MobileNetV2(),可以調用 VGG16 、 VGG19 、 ResNet 、 MobileNet 等內置模型,使用預訓練好的權重初始化網絡
import tensorflow
as tf
import tensorflow_datasets
as tfdsnum_epoch
= 2
batch_size
= 16
learning_rate
= 1e - 3 version
= tf
. __version__
gpu_ok
= tf
. config
. list_physical_devices
( 'GPU' )
print ( "tf version:" , version
, "\nuse GPU" , gpu_ok
)
dataset
= tfds
. load
( "tf_flowers" , split
= tfds
. Split
. TRAIN
, as_supervised
= True )
dataset
= dataset
. map ( lambda img
, label
: ( tf
. image
. resize
( img
, ( 224 , 224 ) ) / 255.0 , label
) ) . shuffle
( 1024 ) . batch
( batch_size
)
model
= tf
. keras
. applications
. MobileNetV2
( include_top
= True , weights
= None , classes
= 5 )
optimizer
= tf
. keras
. optimizers
. Adam
( learning_rate
= learning_rate
)
for e
in range ( num_epoch
) : for images
, labels
in dataset
: with tf
. GradientTape
( ) as tape
: pred
= model
( images
, training
= True ) loss
= tf
. keras
. losses
. sparse_categorical_crossentropy
( y_true
= labels
, y_pred
= pred
) loss
= tf
. reduce_mean
( loss
) print ( "loss: {}" . format ( loss
. numpy
( ) ) ) grads
= tape
. gradient
( loss
, model
. trainable_variables
) optimizer
. apply_gradients
( grads_and_vars
= zip ( grads
, model
. trainable_variables
) )
注:如果數據集下載緩慢,我傳到csdn了,免費下載
3. RNN 循環神經網絡
數據預處理,字符 與 idx 的相互轉換映射, 字符集 獲取 batch_size 個樣本、每個樣本的下一個字符(標簽)
import tensorflow
as tf
import numpy
as np
class Dataloader ( ) : def __init__ ( self
) : path
= tf
. keras
. utils
. get_file
( 'nietzsche.txt' , origin
= 'https://s3.amazonaws.com/text-datasets/nietzsche.txt' ) with open ( path
, encoding
= 'utf-8' ) as f
: self
. raw_text
= f
. read
( ) . lower
( ) self
. chars
= sorted ( list ( set ( self
. raw_text
) ) ) self
. char_idx
= dict ( ( c
, i
) for i
, c
in enumerate ( self
. chars
) ) self
. idx_char
= dict ( ( i
, c
) for i
, c
in enumerate ( self
. chars
) ) self
. text
= [ self
. char_idx
[ c
] for c
in self
. raw_text
] def get_batch ( self
, seq_len
, batch_size
) : seq
= [ ] next_char
= [ ] for i
in range ( batch_size
) : idx
= np
. random
. randint
( 0 , len ( self
. text
) - seq_len
) seq
. append
( self
. text
[ idx
: idx
+ seq_len
] ) next_char
. append
( self
. text
[ idx
+ seq_len
] ) return np
. array
( seq
) , np
. array
( next_char
)
建模,tf.keras.layers.LSTMCell
class myRNN ( tf
. keras
. Model
) : def __init__ ( self
, num_chars
, batch_size
, seq_len
) : super ( ) . __init__
( ) self
. num_chars
= num_charsself
. seq_len
= seq_lenself
. batch_size
= batch_sizeself
. cell
= tf
. keras
. layers
. LSTMCell
( units
= 256 ) self
. dense
= tf
. keras
. layers
. Dense
( units
= self
. num_chars
) def call ( self
, inputs
, from_logits
= False ) : inputs
= tf
. one_hot
( inputs
, depth
= self
. num_chars
) state
= self
. cell
. get_initial_state
( batch_size
= self
. batch_size
, dtype
= tf
. float32
) for t
in range ( self
. seq_len
) : output
, state
= self
. cell
( inputs
[ : , t
, : ] , state
) logits
= self
. dense
( output
) if from_logits
: return logits
else : return tf
. nn
. softmax
( logits
) def predict ( self
, inputs
, temperature
= 1.0 ) : batch_size
, _
= tf
. shape
( inputs
) logits
= self
( inputs
, from_logits
= True ) prob
= tf
. nn
. softmax
( logits
/ temperature
) . numpy
( ) return np
. array
( [ np
. random
. choice
( self
. num_chars
, p
= prob
[ i
, : ] ) for i
in range ( batch_size
. numpy
( ) ) ] )
num_batches
= 1000
seq_len
= 40
batch_size
= 64
learning_rate
= 1e - 3 data_loader
= Dataloader
( )
model
= myRNN
( num_chars
= len ( data_loader
. chars
) , batch_size
= batch_size
, seq_len
= seq_len
)
optimizer
= tf
. keras
. optimizers
. Adam
( learning_rate
= learning_rate
)
for i
in range ( num_batches
) : X
, y
= data_loader
. get_batch
( seq_len
, batch_size
) with tf
. GradientTape
( ) as tape
: y_pred
= model
( X
) loss
= tf
. keras
. losses
. sparse_categorical_crossentropy
( y
, y_pred
) loss
= tf
. reduce_mean
( loss
) print ( "batch:{}, loss {}" . format ( i
, loss
. numpy
( ) ) ) grads
= tape
. gradient
( loss
, model
. variables
) optimizer
. apply_gradients
( zip ( grads
, model
. variables
) )
X_
, _
= data_loader
. get_batch
( seq_len
, 1 )
for diversity
in [ 0.2 , 0.5 , 1.0 , 1.2 ] : X
= X_
print ( 'diversity {}' . format ( diversity
) ) for t
in range ( 400 ) : y_pred
= model
. predict
( X
, diversity
) print ( data_loader
. idx_char
[ y_pred
[ 0 ] ] , end
= '' , flush
= True ) X
= np
. concatenate
( [ X
[ : , 1 : ] , np
. expand_dims
( y_pred
, axis
= 1 ) ] , axis
= - 1 ) print ( "\n" )
輸出:
diversity
0.2 the the sere the s
and the s
and the the sere the the s
and in the sere the ches the the sere the the sore the the s
and the the s
and the serend the seres the the the serely the the the s
all the the the s
and the the sere the the the sere the ther the the sorece the ninge sore the the the s of sell the pint the s the the the the the the the s of the serere the the s
and the sere the s the the tdiversity
0.5
ere
- - - and the ne ous bored bo s the the ande sereng to then hithe the the
he sesthard on the non there the mores sor the the thit fus the ches sored the seresit
and the the ntithe s at
all sent
for the fas theng the d end the ind che the the serangen
the
for ole the soll dund
, and chered
and the
pereropher of the resiged the the s lore
not the the s
as the s the dethere hor the s mone se soull diversity
1.0
tyrive oop art rrame nd michicosentiun
, luind the trourd tho t ts
. cseseyreve oud s mhendgcomrools bored ere s oll ow ons
, here blprlen
, pforzede ntor
, in this mis je
, iof tore
. hon bf cerign then thect nene hfurlilin of fallll devety irtes" the whiy ins puncaliridut drerales alder
, as inen waveructache semaltou no aven it yuranty ahd oar
in - whe s urofeg to the
serecying
sicoradt
- i0nior anetheragl diversity
1.2
y
, dpr thoucg
, "
, ind soncrea5sfporcul os_
; fac alin th thel
. ( owel
, thenniv poteer" hithichp
- hispin2
, ho thas d
- lher wrekek
- - - fe l seh rabf ssit afolyicud i iedy
, d
chendle
- hand
- a ne
lef urovut
, phetif po'n
. wskin ef
; phtors eve mdd ali
all
an icig tedt g main aisec cowstixgeof adt vinnd thas phinte
lllivita ou
is
toup tualy
as isscppomeofea2y
ie?y ounscded!wheor ome sllat
, hhe"se
, ouondibis
豐富度 大一些的時候,預測出來的字符多樣一些
總結
以上是生活随笔 為你收集整理的TensorFlow 2.0 - CNN / 预训练 / RNN 的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔 網站內容還不錯,歡迎將生活随笔 推薦給好友。