首页 新闻 会员 周边

tensorflow搭建网络的问题

0
悬赏园豆:100 [待解决问题]

大神您好,我在搭建自己的神经网络时遇到了问题,不知道怎么解决,请您救命!

  我有两组数据(A和B),这A和B一同对应一组标签Y。我把A输入到CNN中,把B输入到LSTM中,然后把CNN和LSTM的输出合并在一起得到C,然后把C输入到一个全连接层中,全连接层的输出是prediction,然后用prediction和Y求loss,在用优化器进行优化从而来训练整个网络。

   但是网络并不能进行训练,报的错我也没有在网上找到答案,如果您知道问题出在哪里的话,请您帮助我一下,万分感激!!!

   下面是我的代码:

import tensorflow as tf
from PIL import Image
import tf_slim as slim
import os
import sys
import numpy as np
import scipy.io as sio
import random
tf.compat.v1.disable_eager_execution() # 保证sess.run()能够正常运行

def CNN_3D(inputs):
with slim.arg_scope([slim.conv2d],
weights_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005),
biases_initializer=tf.constant_initializer(0.1)):
with slim.arg_scope([slim.avg_pool2d, slim.max_pool2d],
padding='VALID'):
net1 = slim.conv2d(inputs, 10, [3, 3], stride=2, padding='same', scope='conv1')
net2 = slim.conv2d(net1, 5, [3, 3], stride=1, padding='VALID', scope='conv2')
net3 = slim.max_pool2d(net2, [3, 3], stride=1, scope='max_pool1')
net4 = slim.conv2d(net3, 3, [3, 3], stride=1, padding='VALID', scope='conv3')
net5 = slim.max_pool2d(net4, [1, 3], stride=1, scope='max_pool1')
output = slim.flatten(net5)
return output

def ful_connec(inputs):
with slim.arg_scope([slim.fully_connected],
weights_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005),
biases_initializer=tf.constant_initializer(0.1)):
f_net1 = slim.fully_connected(inputs, 20, activation_fn=tf.nn.sigmoid)
f_net2 = slim.fully_connected(f_net1, 10, activation_fn=tf.nn.sigmoid)
f_net3 = slim.fully_connected(f_net2, 1)
return f_net3

def radardata_lstm(inputs, n_hidden):
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(n_hidden)
# inputs : [batch_size, max_time, n_inputs] n_inputs:是一次输入的数据个数
outputs, final_state = tf.compat.v1.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
return outputs[:, -1, :]

def _get_filenames_and_classes(data_file): # data_file为要写入tfrecord文件的数据文件的路径
mat_filenames = []
for filename in os.listdir(data_file):
# 获取文件路径
path = os.path.join(data_file, filename)
# 包括着文件的绝对路径
mat_filenames.append(path)
return mat_filenames

创建placeholder

x_1 = tf.compat.v1.placeholder(tf.float32, [16, 31, 35])
x_2 = tf.compat.v1.placeholder(tf.float32, [None, 512])
y_1 = tf.compat.v1.placeholder(tf.float32, [None, 1])
y_2 = tf.compat.v1.placeholder(tf.float32, [None, 1])
CNN_3D_output_batch = tf.compat.v1.placeholder(tf.int32)

mat文件路径

mat_position = mat_file = 'D:/Deeplearning_model_by_myself/net_test_data_set/'

获得mat文件路径

mat_filenames = get_filenames_and_classes(mat_position)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.local_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
X = x_1[np.newaxis, :]
CNN_3D_output = CNN_3D(X)
# CNN_3D_output_1是CNN_3D_output重复的结果,为了与lstm_output合成一个数据
CNN_3D_output_1 = tf.tile(input=CNN_3D_output, multiples=[CNN_3D_output_batch, 1])
X_2 = x_2[:, :, np.newaxis]
lstm_output = radardata_lstm(X_2, 20)
full_connect_input = tf.concat([CNN_3D_output_1, lstm_output], 1)
full_connect_output = ful_connec(full_connect_input)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=full_connect_output, labels=y_1))
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
accuracy = tf.reduce_mean(tf.square(y_1 - full_connect_output))
# 开始训练
for i in range(10):
# 加载数据
for j, filename in enumerate(mat_filenames):
electron_density = sio.loadmat(filename)['electron_density']
radar_data = sio.loadmat(filename)['radar_data']
wind_speed = sio.loadmat(filename)['wind_speed']
wind_direction = sio.loadmat(filename)['wind_direction']
batch_size = wind_speed.shape[0]
sess.run(optimizer, feed_dict={x_1: electron_density, x_2: radar_data,
y_1: wind_speed, CNN_3D_output_batch: batch_size})
acc, loss
= sess.run([accuracy, loss], feed_dict={x_1: electron_density,
x_2: radar_data,
y_1: wind_speed,
CNN_3D_output_batch: batch_size})
print("Iter:%d Loss:%.3f Accuracy:%.2f" % (i, loss_, acc))

报的错误为:
tensorflow.python.framework.errors_impl.FailedPreconditionError: Could not find variable beta1_power. This could mean that the variable has been deleted. In TF1, it can also mean the variable is uninitialized. Debug info: container=localhost, status=Not found: Container localhost does not exist. (Could not find resource: localhost/beta1_power)
[[node Adam/update_fully_connected_1/weights/ResourceApplyAdam/ReadVariableOp (defined at <input>:113) ]]

大佬救命啊的主页 大佬救命啊 | 初学一级 | 园豆:102
提问于:2021-09-10 13:21
< >
分享
清除回答草稿
   您需要登录以后才能回答,未注册用户请先注册