final37主分支,因为后两个坐标大部分相等,所以去掉了最后一个坐标
方法1:判别器reshape(-1,1024)
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from six.moves import xrange
from numba.tests.test_types import gen
data = np.load('data/final37端点补充相等值.npy')
print(data.shape)
data = data[:,:,0:60]
print(data.shape)
np.set_printoptions(threshold=np.inf)
#显示原始数据图像
def Show_images(data,show_nums,save=False):
index = 0
for n in range(show_nums):
show_images = data[index:index+100]
show_images = show_images.reshape(100,3,60,1)
r,c = 10,10
fig,axs = plt.subplots(r,c)
cnt = 0
for i in range(r):
for j in range(c):
xy = show_images[cnt]
for k in range(len(xy)):
x = xy[k][0:30]
y = xy[k][30:60]
if k == 0 :
axs[i,j].plot(x,y,color='blue',linewidth=2)
if k == 1:
axs[i,j].plot(x,y,color='red',linewidth=2)
if k == 2:
axs[i,j].plot(x,y,color='green',linewidth=2)
axs[i,j].axis('off')
cnt += 1
index += 100
if save:
if not os.path.exists('gen4'):
os.makedirs('gen4')
fig.savefig('gen4/%d.jpg' % n)
plt.close()
else:
plt.show()
def Save_genImages(gen, epoch):
r,c = 10,10
fig,axs = plt.subplots(r,c)
cnt = 0
for i in range(r):
for j in range(c):
xy = gen[cnt]
for k in range(len(xy)):
x = xy[k][0:30]
y = xy[k][30:60]
if k == 0:
axs[i,j].plot(x,y,color='blue')
if k == 1:
axs[i,j].plot(x,y,color='red')
if k == 2:
axs[i,j].plot(x,y,color='green')
axs[i,j].axis('off')
cnt += 1
if not os.path.exists('Manage0'):
os.makedirs('Manage0')
fig.savefig('Manage0/%d.jpg' % epoch)
plt.close()
def Manage_gen(gen_imgs):
#gen_imgs一个维度为(-1,3,60)的数组,头部分支的尾部,与左右分支的头部分开了
#目的把头的尾部,加入左右分支头部,并保证,维度不变
gen_imgs = gen_imgs.reshape(-1,3,60)
finaldata = gen_imgs.tolist()
final = []
for i in range(len(finaldata)):
zhu = finaldata[i][0]
zuo = finaldata[i][1]
you = finaldata[i][2]
#单独分开x,y,列表
zhu_x = zhu[0:30]
zhu_y = zhu[30:60]
zuo_x = zuo[0:30]
zuo_y = zuo[30:60]
you_x = you[0:30]
you_y = you[30:60]
############################################
#真实数据主分支最后两个基本相等,所以生成数据也是,这样计算角度时,就应该计算最后一个和倒数第三个点
#为了让主分支最后一个加在左右分支的头部,此处先去掉左右分支的最后一个点,因为端点插入的值都是相等的,所以去掉影响不大
#然后,再将主分支的尾部,加入左右分支头部,这样,就保证了维度不变
#去除左右分支尾部一个数
del zuo_x[-1]
del zuo_y[-1]
del you_x[-1]
del you_y[-1]
#在左右分支的头部插入主分支的尾部
zuo_x.insert(0,zhu_x[-1])
zuo_y.insert(0,zhu_y[-1])
you_x.insert(0,zhu_x[-1])
you_y.insert(0,zhu_y[-1])
zhu_x.extend(zhu_y)
zuo_x.extend(zuo_y)
you_x.extend(you_y)
fencha = [zhu_x] +[zuo_x] + [you_x]
final.append(fencha)
final = np.array(final)#一个维度为(-1,3,60)的数组
return final
def Save_lossValue(epoch,iters,d_loss,g_loss):
with open('loss3.txt','a') as f:
f.write("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(epoch, iters, d_loss, g_loss)+'\n')
def plot_loss(loss):
fig,ax = plt.subplots(figsize=(20,7))
losses = np.array(loss)
plt.plot(losses.T[0], label="Discriminator Loss")
plt.plot(losses.T[1], label="Generator Loss")
plt.title("Training Losses")
plt.legend()
plt.savefig('loss4.jpg')
plt.show()
#定义Relu激活函数
def Relu(name, tensor):
return tf.nn.relu(tensor,name)
#定义LeakyRelu激活函数
def LeakyRelu(x, alpha=0.25):
return tf.maximum(x, alpha * x)
#定义全连接层
def Fully_connected(name, value, output_shape):
with tf.variable_scope(name, reuse=None) as scope:
shape = value.get_shape().as_list()
w = tf.get_variable('w', [shape[1], output_shape], dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=0.01))
b = tf.get_variable('b', [output_shape], dtype=tf.float32, initializer=tf.constant_initializer(0.0))
return tf.matmul(value, w) + b
def Get_inputs(real_size,noise_size):
real_img = tf.placeholder(tf.float32, [None, real_size], name='real_img')
noise_img = tf.placeholder(tf.float32, [None, noise_size], name='noise_img')
return real_img, noise_img
def Get_noise(noise,batch_size):
if noise == 'uniform':
batch_noise = np.random.uniform(-1, 1, size=(batch_size, noise_size))
elif noise == 'normal':
batch_noise = np.random.normal(-1, 1, size=(batch_size, noise_size))
elif noise == 'normal0_1':
batch_noise = np.random.normal(0, 1, size=(batch_size, noise_size))
return batch_noise
def Discriminator(img, reuse=False, name='discriminator'):
with tf.variable_scope(name, reuse=reuse):
output = Fully_connected('df1',img,1024)
output = LeakyRelu(output)
# output = Fully_connected('df2',output,512)
# output = LeakyRelu(output)
output = Fully_connected('df3',output,1024)
output = LeakyRelu(output)
# output = Fully_connected('df4',output,256)
# output = LeakyRelu(output)
# output = Fully_connected('df6',output,1024)
# output = LeakyRelu(output)
#
# output = Fully_connected('df5',output,1)
prob = tf.sigmoid(output)
return output,prob
def Generator(noise_img, reuse=False, name='generator'):
with tf.variable_scope(name,reuse=reuse):
output = Fully_connected('gf1',noise_img,1024)
output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
output = LeakyRelu(output)
output = Fully_connected('gf2',output,1024)
output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
output = LeakyRelu(output)
# output = Fully_connected('gf3',output,512)
# output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
# output = LeakyRelu(output)
output = Fully_connected('gf4',output,512)
output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
output = tf.nn.relu(output)
# output = Fully_connected('gf6',output,256)
# output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
# output = tf.nn.relu(output)
output = Fully_connected('gf5',output,180)
output = tf.nn.tanh(output)
return output
mode = 'gan' # gan, wgan, wgan-gp
noise = 'uniform' # normal0_1, normal, uniform
batch_size = 100
epochs = 10
n_sample = 100
lamda = 10
img_size = 180
noise_size = 100
tf.reset_default_graph()
real_img, noise_img = Get_inputs(img_size,noise_size)#feed于此
real_data = real_img
fake_data = Generator(noise_img)
disc_real, disc_prob_real = Discriminator(real_data,reuse=False)
disc_fake, disc_prob_fake = Discriminator(fake_data,reuse=True)
#生成器和判别器中的tensor
train_vars = tf.trainable_variables()
g_vars = [var for var in train_vars if var.name.startswith("generator")]
d_vars = [var for var in train_vars if var.name.startswith("discriminator")]
#普通的GAN
if mode == 'gan':
#第一种生成器loss,判别器越好,梯度越是容易消失,最优情况下判别器判别真假概率都为0.5
gen_cost = tf.reduce_mean(tf.log(1-disc_prob_fake))
disc_cost = -tf.reduce_mean(tf.log(disc_prob_real)+tf.log(1-disc_prob_fake))
#第二种生成器loss,容易出现模式崩溃,梯度不稳定
# disc_cost = tf.reduce_mean(-tf.log(disc_prob_real)-tf.log(1-disc_prob_fake))
# gen_cost = tf.reduce_mean(-tf.log((disc_prob_fake)))
# gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.ones_like(disc_fake))) #生成器loss
# disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.zeros_like(disc_fake)))
# disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,labels=tf.ones_like(disc_real)))
# disc_cost /= 2. #判别器loss
#优化器
gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5).minimize(gen_cost,var_list=g_vars)
disc_train_op = tf.train.AdamOptimizer(learning_rate=1e-4,beta1=0.5).minimize(disc_cost,var_list=d_vars)
clip_disc_weights = None
#wgan
elif mode == 'wgan':
gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
#优化器
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost,var_list=g_vars)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost,var_list=d_vars)
clip_ops = []
#将判别器权重截断到[-0.01,0.01]
for var in train_vars:
if var.name.startswith("discriminator"):
clip_bounds = [-0.01, 0.01]
clip_ops.append(tf.assign(var,tf.clip_by_value(var,clip_bounds[0],clip_bounds[1])))
clip_disc_weights = tf.group(*clip_ops)
elif mode == 'wgan-gp':
gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
#梯度惩罚
alpha = tf.random_uniform(shape=[batch_size,1],minval=0.,maxval=1.)
interpolates = alpha*fake_data + (1-alpha)*real_data
gradients = tf.gradients(Discriminator(interpolates,reuse=True),[interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost += lamda * gradient_penalty
clip_disc_weights = None
#优化器
gen_train_op = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5,beta2=0.9).minimize(gen_cost,var_list=g_vars)
disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5,beta2=0.9).minimize(disc_cost,var_list=d_vars)
saver = tf.train.Saver()
def Train():
losses = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for i in xrange(len(data)//batch_size):
batch_images = data[i*batch_size:(i+1)*batch_size]
batch_images = batch_images.reshape(batch_size,180)
if noise != 'normal0_1' :
batch_images = batch_images*2 -1
batch_noise = Get_noise(noise,100)
if mode == 'gan': #普通的gan,判别器,生成器各训练一次
disc_iters = 1
else: #wgan和wgan-gp,判别器训练多次,生成器训练一次
disc_iters = 5
for x in range(0, disc_iters):
_,d_loss,d_acc,d_fake = sess.run([disc_train_op,disc_cost,disc_prob_real,disc_prob_fake],feed_dict={real_data:batch_images,noise_img:batch_noise})
if clip_disc_weights is not None:
_ = sess.run(clip_disc_weights)
for k in range(1):
_,g_loss = sess.run([gen_train_op,gen_cost],feed_dict={noise_img:batch_noise})
Save_lossValue(e,i,d_loss,g_loss)
if i % 50 == 0:
print("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f, d_real_acc: %.8f, d_fake_acc: %.8f"%(e, i, d_loss, g_loss, d_acc.mean(),d_fake.mean()))
losses.append((d_loss,g_loss))
if e == 2 or e == 4 or e == 6 or e == 8 or e == 8:
saver.save(sess,'Manamge/gan%d.ckpt' % e )
sample_noise = Get_noise(noise,100)
gen_samples,gen_samples_prob = sess.run([fake_data,disc_prob_fake],feed_dict={noise_img:sample_noise})
real_samples_prob = sess.run(disc_prob_real,feed_dict={real_data:batch_images,noise_img:sample_noise})
print("判别真的概率:\n",real_samples_prob.mean())
print("判别假的概率: \n",gen_samples_prob.mean())
if e % 1 == 0:
gen = gen_samples.reshape(100,3,60,1)
gen = (gen+1)/2
gen_images = Manage_gen(gen)
Save_genImages(gen_images, e)
plot_loss(losses)
def Save_single(arr,mydir):
data_images = arr
data_images = data_images.reshape(-1,3,60)
for i in range(1,len(arr)):
plt.figure(figsize=(128,128),dpi=1)
plt.plot(data_images[i][0][0:30],data_images[i][0][30:60],color='blue',linewidth=300)
plt.plot(data_images[i][1][0:30],data_images[i][1][30:60],color='red',linewidth=300)
plt.plot(data_images[i][2][0:30],data_images[i][2][30:60],color='green',linewidth=300)
plt.axis('off')
plt.savefig(mydir+os.sep+str(i)+'.jpg',dpi=1)
plt.close()
def Test():
saver = tf.train.Saver()
with tf.Session() as sess:
# saver.restore(sess,tf.train.latest_checkpoint("gan_checkpoints"))
saver.restore(sess,'Manamge/gan8.ckpt')
sample_noise = Get_noise(noise,2000)
gen_samples,gen_samples_prob = sess.run([fake_data,disc_prob_fake],feed_dict={noise_img:sample_noise})
prob = sess.run(tf.reduce_mean(gen_samples_prob,1))
gen_images = (gen_samples+1)/2
gen_images = Manage_gen(gen_images)
good = []
bad = []
for i in range(len(prob)):
#bad 0
关注
打赏
最近更新
- 深拷贝和浅拷贝的区别(重点)
- 【Vue】走进Vue框架世界
- 【云服务器】项目部署—搭建网站—vue电商后台管理系统
- 【React介绍】 一文带你深入React
- 【React】React组件实例的三大属性之state,props,refs(你学废了吗)
- 【脚手架VueCLI】从零开始,创建一个VUE项目
- 【React】深入理解React组件生命周期----图文详解(含代码)
- 【React】DOM的Diffing算法是什么?以及DOM中key的作用----经典面试题
- 【React】1_使用React脚手架创建项目步骤--------详解(含项目结构说明)
- 【React】2_如何使用react脚手架写一个简单的页面?